text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
# Copyright 2024 Susung Hong and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionSAGPipeline >>> pipe = StableDiffusionSAGPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt, sag_scale=0.75).images[0] ``` """ # processes and stores attention probabilities class CrossAttnStoreProcessor: def __init__(self): self.attention_probs = None def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, ): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) self.attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(self.attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states # Modified to get self-attention guidance scale in this paper (https://arxiv.org/pdf/2210.00939.pdf) as an input class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection] = None, requires_safety_checker: bool = True, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, sag_scale: float = 0.75, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. sag_scale (`float`, *optional*, defaults to 0.75): Chosen between [0, 1.0] for better quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # and `sag_scale` is` `s` of equation (16) # of the self-attention guidance paper: https://arxiv.org/pdf/2210.00939.pdf # `sag_scale = 0` means no self-attention guidance do_self_attention_guidance = sag_scale > 0.0 if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True image_embeds, negative_image_embeds = self.encode_image( ip_adapter_image, device, num_images_per_prompt, output_hidden_state ) if do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps if timesteps.dtype not in [torch.int16, torch.int32, torch.int64]: raise ValueError( f"{self.__class__.__name__} does not support using a scheduler of type {self.scheduler.__class__.__name__}. Please make sure to use one of 'DDIMScheduler, PNDMScheduler, DDPMScheduler, DEISMultistepScheduler, UniPCMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler'." ) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None added_uncond_kwargs = {"image_embeds": negative_image_embeds} if ip_adapter_image is not None else None # 7. Denoising loop store_processor = CrossAttnStoreProcessor() self.unet.mid_block.attentions[0].transformer_blocks[0].attn1.processor = store_processor num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order map_size = None def get_map_size(module, input, output): nonlocal map_size map_size = output[0].shape[-2:] with self.unet.mid_block.attentions[0].register_forward_hook(get_map_size): with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform self-attention guidance with the stored self-attention map if do_self_attention_guidance: # classifier-free guidance produces two chunks of attention map # and we only use unconditional one according to equation (25) # in https://arxiv.org/pdf/2210.00939.pdf if do_classifier_free_guidance: # DDIM-like prediction of x0 pred_x0 = self.pred_x0(latents, noise_pred_uncond, t) # get the stored attention maps uncond_attn, cond_attn = store_processor.attention_probs.chunk(2) # self-attention-based degrading of latents degraded_latents = self.sag_masking( pred_x0, uncond_attn, map_size, t, self.pred_epsilon(latents, noise_pred_uncond, t) ) uncond_emb, _ = prompt_embeds.chunk(2) # forward and give guidance degraded_pred = self.unet( degraded_latents, t, encoder_hidden_states=uncond_emb, added_cond_kwargs=added_uncond_kwargs, ).sample noise_pred += sag_scale * (noise_pred_uncond - degraded_pred) else: # DDIM-like prediction of x0 pred_x0 = self.pred_x0(latents, noise_pred, t) # get the stored attention maps cond_attn = store_processor.attention_probs # self-attention-based degrading of latents degraded_latents = self.sag_masking( pred_x0, cond_attn, map_size, t, self.pred_epsilon(latents, noise_pred, t) ) # forward and give guidance degraded_pred = self.unet( degraded_latents, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs, ).sample noise_pred += sag_scale * (noise_pred - degraded_pred) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def sag_masking(self, original_latents, attn_map, map_size, t, eps): # Same masking process as in SAG paper: https://arxiv.org/pdf/2210.00939.pdf bh, hw1, hw2 = attn_map.shape b, latent_channel, latent_h, latent_w = original_latents.shape h = self.unet.config.attention_head_dim if isinstance(h, list): h = h[-1] # Produce attention mask attn_map = attn_map.reshape(b, h, hw1, hw2) attn_mask = attn_map.mean(1, keepdim=False).sum(1, keepdim=False) > 1.0 attn_mask = ( attn_mask.reshape(b, map_size[0], map_size[1]) .unsqueeze(1) .repeat(1, latent_channel, 1, 1) .type(attn_map.dtype) ) attn_mask = F.interpolate(attn_mask, (latent_h, latent_w)) # Blur according to the self-attention mask degraded_latents = gaussian_blur_2d(original_latents, kernel_size=9, sigma=1.0) degraded_latents = degraded_latents * attn_mask + original_latents * (1 - attn_mask) # Noise it again to match the noise level degraded_latents = self.scheduler.add_noise(degraded_latents, noise=eps, timesteps=t[None]) return degraded_latents # Modified from diffusers.schedulers.scheduling_ddim.DDIMScheduler.step # Note: there are some schedulers that clip or do not return x_0 (PNDMScheduler, DDIMScheduler, etc.) def pred_x0(self, sample, model_output, timestep): alpha_prod_t = self.scheduler.alphas_cumprod[timestep].to(sample.device) beta_prod_t = 1 - alpha_prod_t if self.scheduler.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif self.scheduler.config.prediction_type == "sample": pred_original_sample = model_output elif self.scheduler.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output # predict V model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`," " or `v_prediction`" ) return pred_original_sample def pred_epsilon(self, sample, model_output, timestep): alpha_prod_t = self.scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t if self.scheduler.config.prediction_type == "epsilon": pred_eps = model_output elif self.scheduler.config.prediction_type == "sample": pred_eps = (sample - (alpha_prod_t**0.5) * model_output) / (beta_prod_t**0.5) elif self.scheduler.config.prediction_type == "v_prediction": pred_eps = (beta_prod_t**0.5) * sample + (alpha_prod_t**0.5) * model_output else: raise ValueError( f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`," " or `v_prediction`" ) return pred_eps # Gaussian blur def gaussian_blur_2d(img, kernel_size, sigma): ksize_half = (kernel_size - 1) * 0.5 x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) pdf = torch.exp(-0.5 * (x / sigma).pow(2)) x_kernel = pdf / pdf.sum() x_kernel = x_kernel.to(device=img.device, dtype=img.dtype) kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :]) kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1]) padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2] img = F.pad(img, padding, mode="reflect") img = F.conv2d(img, kernel2d, groups=img.shape[-3]) return img
diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py", "repo_id": "diffusers", "token_count": 20000 }
139
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet3DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import TextToVideoSDPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import TextToVideoSDPipeline >>> from diffusers.utils import export_to_video >>> pipe = TextToVideoSDPipeline.from_pretrained( ... "damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16" ... ) >>> pipe.enable_model_cpu_offload() >>> prompt = "Spiderman is surfing" >>> video_frames = pipe(prompt).frames[0] >>> video_path = export_to_video(video_frames) >>> video_path ``` """ # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid def tensor2vid(video: torch.Tensor, processor: "VaeImageProcessor", output_type: str = "np"): batch_size, channels, num_frames, height, width = video.shape outputs = [] for batch_idx in range(batch_size): batch_vid = video[batch_idx].permute(1, 0, 2, 3) batch_output = processor.postprocess(batch_vid, output_type) outputs.append(batch_output) if output_type == "np": outputs = np.stack(outputs) elif output_type == "pt": outputs = torch.stack(outputs) elif not output_type == "pil": raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") return outputs class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin): r""" Pipeline for text-to-video generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet3DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "text_encoder->unet->vae" def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents batch_size, channels, num_frames, height, width = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) image = self.vae.decode(latents).sample video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 video = video.float() return video # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): shape = ( batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_frames: int = 16, num_inference_steps: int = 50, guidance_scale: float = 9.0, negative_prompt: Optional[Union[str, List[str]]] = None, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "np", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated video. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated video. num_frames (`int`, *optional*, defaults to 16): The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds amounts to 2 seconds of video. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality videos at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. Latents should be of shape `(batch_size, num_channel, num_frames, height, width)`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated video. Choose between `torch.FloatTensor` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_images_per_prompt = 1 # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # reshape latents bsz, channel, frames, width, height = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # reshape latents back latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # 8. Post processing if output_type == "latent": video = latents else: video_tensor = self.decode_latents(latents) video = tensor2vid(video_tensor, self.image_processor, output_type) # 9. Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return TextToVideoSDPipelineOutput(frames=video)
diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py", "repo_id": "diffusers", "token_count": 14098 }
140
# Copyright (c) 2023 Dominic Rampas MIT License # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Dict, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin from ...models.attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ...models.modeling_utils import ModelMixin from ...utils import is_torch_version from .modeling_wuerstchen_common import AttnBlock, ResBlock, TimestepBlock, WuerstchenLayerNorm class WuerstchenPrior(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): unet_name = "prior" _supports_gradient_checkpointing = True @register_to_config def __init__(self, c_in=16, c=1280, c_cond=1024, c_r=64, depth=16, nhead=16, dropout=0.1): super().__init__() conv_cls = nn.Conv2d linear_cls = nn.Linear self.c_r = c_r self.projection = conv_cls(c_in, c, kernel_size=1) self.cond_mapper = nn.Sequential( linear_cls(c_cond, c), nn.LeakyReLU(0.2), linear_cls(c, c), ) self.blocks = nn.ModuleList() for _ in range(depth): self.blocks.append(ResBlock(c, dropout=dropout)) self.blocks.append(TimestepBlock(c, c_r)) self.blocks.append(AttnBlock(c, c, nhead, self_attn=True, dropout=dropout)) self.out = nn.Sequential( WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6), conv_cls(c, c_in * 2, kernel_size=1), ) self.gradient_checkpointing = False self.set_default_attn_processor() @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value def gen_r_embedding(self, r, max_positions=10000): r = r * max_positions half_dim = self.c_r // 2 emb = math.log(max_positions) / (half_dim - 1) emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() emb = r[:, None] * emb[None, :] emb = torch.cat([emb.sin(), emb.cos()], dim=1) if self.c_r % 2 == 1: # zero pad emb = nn.functional.pad(emb, (0, 1), mode="constant") return emb.to(dtype=r.dtype) def forward(self, x, r, c): x_in = x x = self.projection(x) c_embed = self.cond_mapper(c) r_embed = self.gen_r_embedding(r) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version(">=", "1.11.0"): for block in self.blocks: if isinstance(block, AttnBlock): x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, c_embed, use_reentrant=False ) elif isinstance(block, TimestepBlock): x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, r_embed, use_reentrant=False ) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) else: for block in self.blocks: if isinstance(block, AttnBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, c_embed) elif isinstance(block, TimestepBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, r_embed) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x) else: for block in self.blocks: if isinstance(block, AttnBlock): x = block(x, c_embed) elif isinstance(block, TimestepBlock): x = block(x, r_embed) else: x = block(x) a, b = self.out(x).chunk(2, dim=1) return (x_in - a) / ((1 - b).abs() + 1e-5)
diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py", "repo_id": "diffusers", "token_count": 3806 }
141
# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput class DDIMParallelSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr def rescale_zero_terminal_snr(betas): """ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) Args: betas (`torch.FloatTensor`): the betas that the scheduler is being initialized with. Returns: `torch.FloatTensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DDIMParallelScheduler(SchedulerMixin, ConfigMixin): """ Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with non-Markovian guidance. [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details, see the original paper: https://arxiv.org/abs/2010.02502 Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, optional): option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. clip_sample (`bool`, default `True`): option to clip predicted sample for numerical stability. clip_sample_range (`float`, default `1.0`): the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. set_alpha_to_one (`bool`, default `True`): each diffusion step uses the value of alphas product at that step and at the previous one. For the final step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, otherwise it uses the value of alpha at step 0. steps_offset (`int`, default `0`): An offset added to the inference steps, as required by some model families. prediction_type (`str`, default `epsilon`, optional): prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) thresholding (`bool`, default `False`): whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). Note that the thresholding method is unsuitable for latent-space diffusion models (such as stable-diffusion). dynamic_thresholding_ratio (`float`, default `0.995`): the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`. sample_max_value (`float`, default `1.0`): the threshold value for dynamic thresholding. Valid only when `thresholding=True`. timestep_spacing (`str`, default `"leading"`): The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information. rescale_betas_zero_snr (`bool`, default `False`): whether to rescale the betas to have zero terminal SNR (proposed by https://arxiv.org/pdf/2305.08891.pdf). This can enable the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 _is_ode_scheduler = True @register_to_config # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.__init__ def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, clip_sample: bool = True, set_alpha_to_one: bool = True, steps_offset: int = 0, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, clip_sample_range: float = 1.0, sample_max_value: float = 1.0, timestep_spacing: str = "leading", rescale_betas_zero_snr: bool = False, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") # Rescale for zero SNR if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # At every step in ddim, we are looking into the previous alphas_cumprod # For the final step, there is no previous alphas_cumprod because we are already at 0 # `set_alpha_to_one` decides whether we set this parameter simply to one or # whether we use the final alpha of the "non-previous" one. self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.scale_model_input def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample def _get_variance(self, timestep, prev_timestep=None): if prev_timestep is None: prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def _batch_get_variance(self, t, prev_t): alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) return variance # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.set_timesteps def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. """ if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) self.num_inference_steps = num_inference_steps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = ( np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) .round()[::-1] .copy() .astype(np.int64) ) elif self.config.timestep_spacing == "leading": step_ratio = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." ) self.timesteps = torch.from_numpy(timesteps).to(device) def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, eta: float = 0.0, use_clipped_model_output: bool = False, generator=None, variance_noise: Optional[torch.FloatTensor] = None, return_dict: bool = True, ) -> Union[DDIMParallelSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. eta (`float`): weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would coincide with the one provided as input and `use_clipped_model_output` will have not effect. generator: random number generator. variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we can directly provide the noise for the variance itself. This is useful for methods such as CycleDiffusion. (https://arxiv.org/abs/2210.05559) return_dict (`bool`): option for returning tuple rather than DDIMParallelSchedulerOutput class Returns: [`~schedulers.scheduling_utils.DDIMParallelSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.DDIMParallelSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) pred_epsilon = model_output elif self.config.prediction_type == "sample": pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = self._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** (0.5) if use_clipped_model_output: # the pred_epsilon is always re-derived from the clipped x_0 in Glide pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if eta > 0: if variance_noise is not None and generator is not None: raise ValueError( "Cannot pass both generator and variance_noise. Please make sure that either `generator` or" " `variance_noise` stays `None`." ) if variance_noise is None: variance_noise = randn_tensor( model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype ) variance = std_dev_t * variance_noise prev_sample = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMParallelSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def batch_step_no_noise( self, model_output: torch.FloatTensor, timesteps: List[int], sample: torch.FloatTensor, eta: float = 0.0, use_clipped_model_output: bool = False, ) -> torch.FloatTensor: """ Batched version of the `step` function, to be able to reverse the SDE for multiple samples/timesteps at once. Also, does not add any noise to the predicted sample, which is necessary for parallel sampling where the noise is pre-sampled by the pipeline. Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timesteps (`List[int]`): current discrete timesteps in the diffusion chain. This is now a list of integers. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. eta (`float`): weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would coincide with the one provided as input and `use_clipped_model_output` will have not effect. Returns: `torch.FloatTensor`: sample tensor at previous timestep. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) assert eta == 0.0 # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) t = timesteps prev_t = t - self.config.num_train_timesteps // self.num_inference_steps t = t.view(-1, *([1] * (model_output.ndim - 1))) prev_t = prev_t.view(-1, *([1] * (model_output.ndim - 1))) # 1. compute alphas, betas self.alphas_cumprod = self.alphas_cumprod.to(model_output.device) self.final_alpha_cumprod = self.final_alpha_cumprod.to(model_output.device) alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) pred_epsilon = model_output elif self.config.prediction_type == "sample": pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = self._batch_get_variance(t, prev_t).to(model_output.device).view(*alpha_prod_t_prev.shape) std_dev_t = eta * variance ** (0.5) if use_clipped_model_output: # the pred_epsilon is always re-derived from the clipped x_0 in Glide pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction return prev_sample # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise def add_noise( self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement # for the subsequent add_noise calls self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity def get_velocity( self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as sample self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_ddim_parallel.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_ddim_parallel.py", "repo_id": "diffusers", "token_count": 13291 }
142
# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): """ Scheduler with Heun steps for discrete beta schedules. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear` or `scaled_linear`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). clip_sample (`bool`, defaults to `True`): Clip the predicted sample for numerical stability. clip_sample_range (`float`, defaults to 1.0): The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, the sigmas are determined according to a sequence of noise levels {σi}. timestep_spacing (`str`, defaults to `"linspace"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 2 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.00085, # sensible defaults beta_end: float = 0.012, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, prediction_type: str = "epsilon", use_karras_sigmas: Optional[bool] = False, clip_sample: Optional[bool] = False, clip_sample_range: float = 1.0, timestep_spacing: str = "linspace", steps_offset: int = 0, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="cosine") elif beta_schedule == "exp": self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="exp") else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # set all values self.set_timesteps(num_train_timesteps, None, num_train_timesteps) self.use_karras_sigmas = use_karras_sigmas self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) pos = 1 if len(indices) > 1 else 0 return indices[pos].item() @property def init_noise_sigma(self): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 @property def step_index(self): """ The index counter for current timestep. It will increase 1 after each scheduler step. """ return self._step_index @property def begin_index(self): """ The index for the first timestep. It should be set from pipeline with `set_begin_index` method. """ return self._begin_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def scale_model_input( self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor], ) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = sample / ((sigma**2 + 1) ** 0.5) return sample def set_timesteps( self, num_inference_steps: int, device: Union[str, torch.device] = None, num_train_timesteps: Optional[int] = None, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() elif self.config.timestep_spacing == "leading": step_ratio = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.use_karras_sigmas: sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) sigmas = torch.from_numpy(sigmas).to(device=device) self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) timesteps = torch.from_numpy(timesteps) timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) self.timesteps = timesteps.to(device=device) # empty dt and derivative self.prev_derivative = None self.dt = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(np.maximum(sigma, 1e-10)) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: """Constructs the noise schedule of Karras et al. (2022).""" # Hack to make sure that other schedulers which copy this function don't break # TODO: Add this logic to the other schedulers if hasattr(self.config, "sigma_min"): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, "sigma_max"): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def state_in_first_order(self): return self.dt is None # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step( self, model_output: Union[torch.FloatTensor, np.ndarray], timestep: Union[float, torch.FloatTensor], sample: Union[torch.FloatTensor, np.ndarray], return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.step_index is None: self._init_step_index(timestep) if self.state_in_first_order: sigma = self.sigmas[self.step_index] sigma_next = self.sigmas[self.step_index + 1] else: # 2nd order / Heun's method sigma = self.sigmas[self.step_index - 1] sigma_next = self.sigmas[self.step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API gamma = 0 sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": sigma_input = sigma_hat if self.state_in_first_order else sigma_next pred_original_sample = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": sigma_input = sigma_hat if self.state_in_first_order else sigma_next pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": pred_original_sample = model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" ) if self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order derivative = (sample - pred_original_sample) / sigma_hat # 3. delta timestep dt = sigma_next - sigma_hat # store for 2nd order step self.prev_derivative = derivative self.dt = dt self.sample = sample else: # 2. 2nd order / Heun's method derivative = (sample - pred_original_sample) / sigma_next derivative = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample dt = self.dt sample = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" self.prev_derivative = None self.dt = None self.sample = None prev_sample = sample + derivative * dt # upon completion increase step index by one self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise def add_noise( self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.FloatTensor, ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): # mps does not support float64 schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py", "repo_id": "diffusers", "token_count": 9031 }
143
# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: check https://arxiv.org/abs/2302.04867 and https://github.com/wl-zhao/UniPC for more info # The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin): """ `UniPCMultistepScheduler` is a training-free framework designed for the fast sampling of diffusion models. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. solver_order (`int`, default `2`): The UniPC order which can be any positive integer. The effective order of accuracy is `solver_order + 1` due to the UniC. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `predict_x0=True`. predict_x0 (`bool`, defaults to `True`): Whether to use the updating algorithm on the predicted x0. solver_type (`str`, default `bh2`): Solver type for UniPC. It is recommended to use `bh1` for unconditional sampling when steps < 10, and `bh2` otherwise. lower_order_final (`bool`, default `True`): Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. disable_corrector (`list`, default `[]`): Decides which step to disable the corrector to mitigate the misalignment between `epsilon_theta(x_t, c)` and `epsilon_theta(x_t^c, c)` which can influence convergence for a large guidance scale. Corrector is usually disabled during the first few steps. solver_p (`SchedulerMixin`, default `None`): Any other scheduler that if specified, the algorithm becomes `solver_p + UniC`. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, the sigmas are determined according to a sequence of noise levels {σi}. timestep_spacing (`str`, defaults to `"linspace"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, solver_order: int = 2, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, predict_x0: bool = True, solver_type: str = "bh2", lower_order_final: bool = True, disable_corrector: List[int] = [], solver_p: SchedulerMixin = None, use_karras_sigmas: Optional[bool] = False, timestep_spacing: str = "linspace", steps_offset: int = 0, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # Currently we only support VP-type noise schedule self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 if solver_type not in ["bh1", "bh2"]: if solver_type in ["midpoint", "heun", "logrho"]: self.register_to_config(solver_type="bh2") else: raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}") self.predict_x0 = predict_x0 # setable values self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.timestep_list = [None] * solver_order self.lower_order_nums = 0 self.disable_corrector = disable_corrector self.solver_p = solver_p self.last_sample = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication @property def step_index(self): """ The index counter for current timestep. It will increase 1 after each scheduler step. """ return self._step_index @property def begin_index(self): """ The index for the first timestep. It should be set from pipeline with `set_begin_index` method. """ return self._begin_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = ( np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1) .round()[::-1][:-1] .copy() .astype(np.int64) ) elif self.config.timestep_spacing == "leading": step_ratio = self.config.num_train_timesteps // (num_inference_steps + 1) # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) if self.config.use_karras_sigmas: log_sigmas = np.log(sigmas) sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [ None, ] * self.config.solver_order self.lower_order_nums = 0 self.last_sample = None if self.solver_p: self.solver_p.set_timesteps(self.num_inference_steps, device=device) # add an index counter for schedulers that allow duplicated timesteps self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(np.maximum(sigma, 1e-10)) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / ((sigma**2 + 1) ** 0.5) sigma_t = sigma * alpha_t return alpha_t, sigma_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: """Constructs the noise schedule of Karras et al. (2022).""" # Hack to make sure that other schedulers which copy this function don't break # TODO: Add this logic to the other schedulers if hasattr(self.config, "sigma_min"): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, "sigma_max"): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output( self, model_output: torch.FloatTensor, *args, sample: torch.FloatTensor = None, **kwargs, ) -> torch.FloatTensor: r""" Convert the model output to the corresponding type the UniPC algorithm needs. Args: model_output (`torch.FloatTensor`): The direct output from the learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. Returns: `torch.FloatTensor`: The converted model output. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyward argument") if timestep is not None: deprecate( "timesteps", "1.0.0", "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) if self.predict_x0: if self.config.prediction_type == "epsilon": x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == "sample": x0_pred = model_output elif self.config.prediction_type == "v_prediction": x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction` for the UniPCMultistepScheduler." ) if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred else: if self.config.prediction_type == "epsilon": return model_output elif self.config.prediction_type == "sample": epsilon = (sample - alpha_t * model_output) / sigma_t return epsilon elif self.config.prediction_type == "v_prediction": epsilon = alpha_t * model_output + sigma_t * sample return epsilon else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction` for the UniPCMultistepScheduler." ) def multistep_uni_p_bh_update( self, model_output: torch.FloatTensor, *args, sample: torch.FloatTensor = None, order: int = None, **kwargs, ) -> torch.FloatTensor: """ One step for the UniP (B(h) version). Alternatively, `self.solver_p` is used if is specified. Args: model_output (`torch.FloatTensor`): The direct output from the learned diffusion model at the current timestep. prev_timestep (`int`): The previous discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. order (`int`): The order of UniP at this timestep (corresponds to the *p* in UniPC-p). Returns: `torch.FloatTensor`: The sample tensor at the previous timestep. """ prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError(" missing `sample` as a required keyward argument") if order is None: if len(args) > 2: order = args[2] else: raise ValueError(" missing `order` as a required keyward argument") if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) model_output_list = self.model_outputs s0 = self.timestep_list[-1] m0 = model_output_list[-1] x = sample if self.solver_p: x_t = self.solver_p.step(model_output, s0, x).prev_sample return x_t sigma_t, sigma_s0 = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) h = lambda_t - lambda_s0 device = sample.device rks = [] D1s = [] for i in range(1, order): si = self.step_index - i mi = model_output_list[-(i + 1)] alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) rk = (lambda_si - lambda_s0) / h rks.append(rk) D1s.append((mi - m0) / rk) rks.append(1.0) rks = torch.tensor(rks, device=device) R = [] b = [] hh = -h if self.predict_x0 else h h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 h_phi_k = h_phi_1 / hh - 1 factorial_i = 1 if self.config.solver_type == "bh1": B_h = hh elif self.config.solver_type == "bh2": B_h = torch.expm1(hh) else: raise NotImplementedError() for i in range(1, order + 1): R.append(torch.pow(rks, i - 1)) b.append(h_phi_k * factorial_i / B_h) factorial_i *= i + 1 h_phi_k = h_phi_k / hh - 1 / factorial_i R = torch.stack(R) b = torch.tensor(b, device=device) if len(D1s) > 0: D1s = torch.stack(D1s, dim=1) # (B, K) # for order 2, we use a simplified version if order == 2: rhos_p = torch.tensor([0.5], dtype=x.dtype, device=device) else: rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1]) else: D1s = None if self.predict_x0: x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 if D1s is not None: pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) else: pred_res = 0 x_t = x_t_ - alpha_t * B_h * pred_res else: x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 if D1s is not None: pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) else: pred_res = 0 x_t = x_t_ - sigma_t * B_h * pred_res x_t = x_t.to(x.dtype) return x_t def multistep_uni_c_bh_update( self, this_model_output: torch.FloatTensor, *args, last_sample: torch.FloatTensor = None, this_sample: torch.FloatTensor = None, order: int = None, **kwargs, ) -> torch.FloatTensor: """ One step for the UniC (B(h) version). Args: this_model_output (`torch.FloatTensor`): The model outputs at `x_t`. this_timestep (`int`): The current timestep `t`. last_sample (`torch.FloatTensor`): The generated sample before the last predictor `x_{t-1}`. this_sample (`torch.FloatTensor`): The generated sample after the last predictor `x_{t}`. order (`int`): The `p` of UniC-p at this step. The effective order of accuracy should be `order + 1`. Returns: `torch.FloatTensor`: The corrected sample tensor at the current timestep. """ this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) if last_sample is None: if len(args) > 1: last_sample = args[1] else: raise ValueError(" missing`last_sample` as a required keyward argument") if this_sample is None: if len(args) > 2: this_sample = args[2] else: raise ValueError(" missing`this_sample` as a required keyward argument") if order is None: if len(args) > 3: order = args[3] else: raise ValueError(" missing`order` as a required keyward argument") if this_timestep is not None: deprecate( "this_timestep", "1.0.0", "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) model_output_list = self.model_outputs m0 = model_output_list[-1] x = last_sample x_t = this_sample model_t = this_model_output sigma_t, sigma_s0 = self.sigmas[self.step_index], self.sigmas[self.step_index - 1] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) h = lambda_t - lambda_s0 device = this_sample.device rks = [] D1s = [] for i in range(1, order): si = self.step_index - (i + 1) mi = model_output_list[-(i + 1)] alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) rk = (lambda_si - lambda_s0) / h rks.append(rk) D1s.append((mi - m0) / rk) rks.append(1.0) rks = torch.tensor(rks, device=device) R = [] b = [] hh = -h if self.predict_x0 else h h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 h_phi_k = h_phi_1 / hh - 1 factorial_i = 1 if self.config.solver_type == "bh1": B_h = hh elif self.config.solver_type == "bh2": B_h = torch.expm1(hh) else: raise NotImplementedError() for i in range(1, order + 1): R.append(torch.pow(rks, i - 1)) b.append(h_phi_k * factorial_i / B_h) factorial_i *= i + 1 h_phi_k = h_phi_k / hh - 1 / factorial_i R = torch.stack(R) b = torch.tensor(b, device=device) if len(D1s) > 0: D1s = torch.stack(D1s, dim=1) else: D1s = None # for order 1, we use a simplified version if order == 1: rhos_c = torch.tensor([0.5], dtype=x.dtype, device=device) else: rhos_c = torch.linalg.solve(R, b) if self.predict_x0: x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 if D1s is not None: corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) else: corr_res = 0 D1_t = model_t - m0 x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t) else: x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 if D1s is not None: corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) else: corr_res = 0 D1_t = model_t - m0 x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t) x_t = x_t.to(x.dtype) return x_t # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index def _init_step_index(self, timestep): """ Initialize the step_index counter for the scheduler. """ if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with the multistep UniPC. Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.step_index is None: self._init_step_index(timestep) use_corrector = ( self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and self.last_sample is not None ) model_output_convert = self.convert_model_output(model_output, sample=sample) if use_corrector: sample = self.multistep_uni_c_bh_update( this_model_output=model_output_convert, last_sample=self.last_sample, this_sample=sample, order=self.this_order, ) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.timestep_list[i] = self.timestep_list[i + 1] self.model_outputs[-1] = model_output_convert self.timestep_list[-1] = timestep if self.config.lower_order_final: this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) else: this_order = self.config.solver_order self.this_order = min(this_order, self.lower_order_nums + 1) # warmup for multistep assert self.this_order > 0 self.last_sample = sample prev_sample = self.multistep_uni_p_bh_update( model_output=model_output, # pass the original non-converted model output, in case solver-p is used sample=sample, order=self.this_order, ) if self.lower_order_nums < self.config.solver_order: self.lower_order_nums += 1 # upon completion increase step index by one self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise def add_noise( self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): # mps does not support float64 schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) noisy_samples = alpha_t * original_samples + sigma_t * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_unipc_multistep.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_unipc_multistep.py", "repo_id": "diffusers", "token_count": 17388 }
144
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class LMSDiscreteScheduler(metaclass=DummyObject): _backends = ["torch", "scipy"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "scipy"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "scipy"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "scipy"])
diffusers/src/diffusers/utils/dummy_torch_and_scipy_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torch_and_scipy_objects.py", "repo_id": "diffusers", "token_count": 220 }
145
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ State dict utilities: utility methods for converting state dicts easily """ import enum from .logging import get_logger logger = get_logger(__name__) class StateDictType(enum.Enum): """ The mode to use when converting state dicts. """ DIFFUSERS_OLD = "diffusers_old" KOHYA_SS = "kohya_ss" PEFT = "peft" DIFFUSERS = "diffusers" # We need to define a proper mapping for Unet since it uses different output keys than text encoder # e.g. to_q_lora -> q_proj / to_q UNET_TO_DIFFUSERS = { ".to_out_lora.up": ".to_out.0.lora_B", ".to_out_lora.down": ".to_out.0.lora_A", ".to_q_lora.down": ".to_q.lora_A", ".to_q_lora.up": ".to_q.lora_B", ".to_k_lora.down": ".to_k.lora_A", ".to_k_lora.up": ".to_k.lora_B", ".to_v_lora.down": ".to_v.lora_A", ".to_v_lora.up": ".to_v.lora_B", ".lora.up": ".lora_B", ".lora.down": ".lora_A", } DIFFUSERS_TO_PEFT = { ".q_proj.lora_linear_layer.up": ".q_proj.lora_B", ".q_proj.lora_linear_layer.down": ".q_proj.lora_A", ".k_proj.lora_linear_layer.up": ".k_proj.lora_B", ".k_proj.lora_linear_layer.down": ".k_proj.lora_A", ".v_proj.lora_linear_layer.up": ".v_proj.lora_B", ".v_proj.lora_linear_layer.down": ".v_proj.lora_A", ".out_proj.lora_linear_layer.up": ".out_proj.lora_B", ".out_proj.lora_linear_layer.down": ".out_proj.lora_A", ".lora_linear_layer.up": ".lora_B", ".lora_linear_layer.down": ".lora_A", } DIFFUSERS_OLD_TO_PEFT = { ".to_q_lora.up": ".q_proj.lora_B", ".to_q_lora.down": ".q_proj.lora_A", ".to_k_lora.up": ".k_proj.lora_B", ".to_k_lora.down": ".k_proj.lora_A", ".to_v_lora.up": ".v_proj.lora_B", ".to_v_lora.down": ".v_proj.lora_A", ".to_out_lora.up": ".out_proj.lora_B", ".to_out_lora.down": ".out_proj.lora_A", ".lora_linear_layer.up": ".lora_B", ".lora_linear_layer.down": ".lora_A", } PEFT_TO_DIFFUSERS = { ".q_proj.lora_B": ".q_proj.lora_linear_layer.up", ".q_proj.lora_A": ".q_proj.lora_linear_layer.down", ".k_proj.lora_B": ".k_proj.lora_linear_layer.up", ".k_proj.lora_A": ".k_proj.lora_linear_layer.down", ".v_proj.lora_B": ".v_proj.lora_linear_layer.up", ".v_proj.lora_A": ".v_proj.lora_linear_layer.down", ".out_proj.lora_B": ".out_proj.lora_linear_layer.up", ".out_proj.lora_A": ".out_proj.lora_linear_layer.down", "to_k.lora_A": "to_k.lora.down", "to_k.lora_B": "to_k.lora.up", "to_q.lora_A": "to_q.lora.down", "to_q.lora_B": "to_q.lora.up", "to_v.lora_A": "to_v.lora.down", "to_v.lora_B": "to_v.lora.up", "to_out.0.lora_A": "to_out.0.lora.down", "to_out.0.lora_B": "to_out.0.lora.up", } DIFFUSERS_OLD_TO_DIFFUSERS = { ".to_q_lora.up": ".q_proj.lora_linear_layer.up", ".to_q_lora.down": ".q_proj.lora_linear_layer.down", ".to_k_lora.up": ".k_proj.lora_linear_layer.up", ".to_k_lora.down": ".k_proj.lora_linear_layer.down", ".to_v_lora.up": ".v_proj.lora_linear_layer.up", ".to_v_lora.down": ".v_proj.lora_linear_layer.down", ".to_out_lora.up": ".out_proj.lora_linear_layer.up", ".to_out_lora.down": ".out_proj.lora_linear_layer.down", } PEFT_TO_KOHYA_SS = { "lora_A": "lora_down", "lora_B": "lora_up", # This is not a comprehensive dict as kohya format requires replacing `.` with `_` in keys, # adding prefixes and adding alpha values # Check `convert_state_dict_to_kohya` for more } PEFT_STATE_DICT_MAPPINGS = { StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_PEFT, StateDictType.DIFFUSERS: DIFFUSERS_TO_PEFT, } DIFFUSERS_STATE_DICT_MAPPINGS = { StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS, StateDictType.PEFT: PEFT_TO_DIFFUSERS, } KOHYA_STATE_DICT_MAPPINGS = {StateDictType.PEFT: PEFT_TO_KOHYA_SS} KEYS_TO_ALWAYS_REPLACE = { ".processor.": ".", } def convert_state_dict(state_dict, mapping): r""" Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. mapping (`dict[str, str]`): The mapping to use for conversion, the mapping should be a dictionary with the following structure: - key: the pattern to replace - value: the pattern to replace with Returns: converted_state_dict (`dict`) The converted state dict. """ converted_state_dict = {} for k, v in state_dict.items(): # First, filter out the keys that we always want to replace for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): if pattern in k: new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] k = k.replace(pattern, new_pattern) for pattern in mapping.keys(): if pattern in k: new_pattern = mapping[pattern] k = k.replace(pattern, new_pattern) break converted_state_dict[k] = v return converted_state_dict def convert_state_dict_to_peft(state_dict, original_type=None, **kwargs): r""" Converts a state dict to the PEFT format The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or new diffusers format (`DIFFUSERS`). The method only supports the conversion from diffusers old/new to PEFT for now. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. original_type (`StateDictType`, *optional*): The original type of the state dict, if not provided, the method will try to infer it automatically. """ if original_type is None: # Old diffusers to PEFT if any("to_out_lora" in k for k in state_dict.keys()): original_type = StateDictType.DIFFUSERS_OLD elif any("lora_linear_layer" in k for k in state_dict.keys()): original_type = StateDictType.DIFFUSERS else: raise ValueError("Could not automatically infer state dict type") if original_type not in PEFT_STATE_DICT_MAPPINGS.keys(): raise ValueError(f"Original type {original_type} is not supported") mapping = PEFT_STATE_DICT_MAPPINGS[original_type] return convert_state_dict(state_dict, mapping) def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): r""" Converts a state dict to new diffusers format. The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will return the state dict as is. The method only supports the conversion from diffusers old, PEFT to diffusers new for now. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. original_type (`StateDictType`, *optional*): The original type of the state dict, if not provided, the method will try to infer it automatically. kwargs (`dict`, *args*): Additional arguments to pass to the method. - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in `get_peft_model_state_dict` method: https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 but we add it here in case we don't want to rely on that method. """ peft_adapter_name = kwargs.pop("adapter_name", None) if peft_adapter_name is not None: peft_adapter_name = "." + peft_adapter_name else: peft_adapter_name = "" if original_type is None: # Old diffusers to PEFT if any("to_out_lora" in k for k in state_dict.keys()): original_type = StateDictType.DIFFUSERS_OLD elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): original_type = StateDictType.PEFT elif any("lora_linear_layer" in k for k in state_dict.keys()): # nothing to do return state_dict else: raise ValueError("Could not automatically infer state dict type") if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): raise ValueError(f"Original type {original_type} is not supported") mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] return convert_state_dict(state_dict, mapping) def convert_unet_state_dict_to_peft(state_dict): r""" Converts a state dict from UNet format to diffusers format - i.e. by removing some keys """ mapping = UNET_TO_DIFFUSERS return convert_state_dict(state_dict, mapping) def convert_all_state_dict_to_peft(state_dict): r""" Attempts to first `convert_state_dict_to_peft`, and if it doesn't detect `lora_linear_layer` for a valid `DIFFUSERS` LoRA for example, attempts to exclusively convert the Unet `convert_unet_state_dict_to_peft` """ try: peft_dict = convert_state_dict_to_peft(state_dict) except Exception as e: if str(e) == "Could not automatically infer state dict type": peft_dict = convert_unet_state_dict_to_peft(state_dict) else: raise if not any("lora_A" in key or "lora_B" in key for key in peft_dict.keys()): raise ValueError("Your LoRA was not converted to PEFT") return peft_dict def convert_state_dict_to_kohya(state_dict, original_type=None, **kwargs): r""" Converts a `PEFT` state dict to `Kohya` format that can be used in AUTOMATIC1111, ComfyUI, SD.Next, InvokeAI, etc. The method only supports the conversion from PEFT to Kohya for now. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. original_type (`StateDictType`, *optional*): The original type of the state dict, if not provided, the method will try to infer it automatically. kwargs (`dict`, *args*): Additional arguments to pass to the method. - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in `get_peft_model_state_dict` method: https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 but we add it here in case we don't want to rely on that method. """ try: import torch except ImportError: logger.error("Converting PEFT state dicts to Kohya requires torch to be installed.") raise peft_adapter_name = kwargs.pop("adapter_name", None) if peft_adapter_name is not None: peft_adapter_name = "." + peft_adapter_name else: peft_adapter_name = "" if original_type is None: if any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): original_type = StateDictType.PEFT if original_type not in KOHYA_STATE_DICT_MAPPINGS.keys(): raise ValueError(f"Original type {original_type} is not supported") # Use the convert_state_dict function with the appropriate mapping kohya_ss_partial_state_dict = convert_state_dict(state_dict, KOHYA_STATE_DICT_MAPPINGS[StateDictType.PEFT]) kohya_ss_state_dict = {} # Additional logic for replacing header, alpha parameters `.` with `_` in all keys for kohya_key, weight in kohya_ss_partial_state_dict.items(): if "text_encoder_2." in kohya_key: kohya_key = kohya_key.replace("text_encoder_2.", "lora_te2.") elif "text_encoder." in kohya_key: kohya_key = kohya_key.replace("text_encoder.", "lora_te1.") elif "unet" in kohya_key: kohya_key = kohya_key.replace("unet", "lora_unet") kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2) kohya_key = kohya_key.replace(peft_adapter_name, "") # Kohya doesn't take names kohya_ss_state_dict[kohya_key] = weight if "lora_down" in kohya_key: alpha_key = f'{kohya_key.split(".")[0]}.alpha' kohya_ss_state_dict[alpha_key] = torch.tensor(len(weight)) return kohya_ss_state_dict
diffusers/src/diffusers/utils/state_dict_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/state_dict_utils.py", "repo_id": "diffusers", "token_count": 5702 }
146
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import VQModel from diffusers.utils.testing_utils import ( backend_manual_seed, enable_full_determinism, floats_tensor, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class VQModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = VQModel main_input_name = "sample" @property def dummy_input(self, sizes=(32, 32)): batch_size = 4 num_channels = 3 image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_forward_signature(self): pass def test_training(self): pass def test_from_pretrained_hub(self): model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): model = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(torch_device).eval() torch.manual_seed(0) backend_manual_seed(torch_device, 0) image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) image = image.to(torch_device) with torch.no_grad(): output = model(image).sample output_slice = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143]) # fmt: on self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
diffusers/tests/models/autoencoders/test_models_vq.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_vq.py", "repo_id": "diffusers", "token_count": 1280 }
147
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from diffusers import StableCascadeUNet from diffusers.utils import logging from diffusers.utils.testing_utils import ( enable_full_determinism, numpy_cosine_similarity_distance, require_torch_gpu, slow, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) enable_full_determinism() @slow class StableCascadeUNetModelSlowTests(unittest.TestCase): def tearDown(self) -> None: super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_cascade_unet_prior_single_file_components(self): single_file_url = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_bf16.safetensors" single_file_unet = StableCascadeUNet.from_single_file(single_file_url) single_file_unet_config = single_file_unet.config del single_file_unet gc.collect() torch.cuda.empty_cache() unet = StableCascadeUNet.from_pretrained("stabilityai/stable-cascade-prior", subfolder="prior", variant="bf16") unet_config = unet.config del unet gc.collect() torch.cuda.empty_cache() PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values"] for param_name, param_value in single_file_unet_config.items(): if param_name in PARAMS_TO_IGNORE: continue assert unet_config[param_name] == param_value def test_stable_cascade_unet_decoder_single_file_components(self): single_file_url = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_bf16.safetensors" single_file_unet = StableCascadeUNet.from_single_file(single_file_url) single_file_unet_config = single_file_unet.config del single_file_unet gc.collect() torch.cuda.empty_cache() unet = StableCascadeUNet.from_pretrained("stabilityai/stable-cascade", subfolder="decoder", variant="bf16") unet_config = unet.config del unet gc.collect() torch.cuda.empty_cache() PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values"] for param_name, param_value in single_file_unet_config.items(): if param_name in PARAMS_TO_IGNORE: continue assert unet_config[param_name] == param_value def test_stable_cascade_unet_config_loading(self): config = StableCascadeUNet.load_config( pretrained_model_name_or_path="diffusers/stable-cascade-configs", subfolder="prior" ) single_file_url = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_bf16.safetensors" single_file_unet = StableCascadeUNet.from_single_file(single_file_url, config=config) single_file_unet_config = single_file_unet.config del single_file_unet gc.collect() torch.cuda.empty_cache() PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values"] for param_name, param_value in config.items(): if param_name in PARAMS_TO_IGNORE: continue assert single_file_unet_config[param_name] == param_value @require_torch_gpu def test_stable_cascade_unet_single_file_prior_forward_pass(self): dtype = torch.bfloat16 generator = torch.Generator("cpu") model_inputs = { "sample": randn_tensor((1, 16, 24, 24), generator=generator.manual_seed(0)).to("cuda", dtype), "timestep_ratio": torch.tensor([1]).to("cuda", dtype), "clip_text_pooled": randn_tensor((1, 1, 1280), generator=generator.manual_seed(0)).to("cuda", dtype), "clip_text": randn_tensor((1, 77, 1280), generator=generator.manual_seed(0)).to("cuda", dtype), "clip_img": randn_tensor((1, 1, 768), generator=generator.manual_seed(0)).to("cuda", dtype), "pixels": randn_tensor((1, 3, 8, 8), generator=generator.manual_seed(0)).to("cuda", dtype), } unet = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade-prior", subfolder="prior", revision="refs/pr/2", variant="bf16", torch_dtype=dtype, ) unet.to("cuda") with torch.no_grad(): prior_output = unet(**model_inputs).sample.float().cpu().numpy() # Remove UNet from GPU memory before loading the single file UNet model del unet gc.collect() torch.cuda.empty_cache() single_file_url = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_bf16.safetensors" single_file_unet = StableCascadeUNet.from_single_file(single_file_url, torch_dtype=dtype) single_file_unet.to("cuda") with torch.no_grad(): prior_single_file_output = single_file_unet(**model_inputs).sample.float().cpu().numpy() # Remove UNet from GPU memory before loading the single file UNet model del single_file_unet gc.collect() torch.cuda.empty_cache() max_diff = numpy_cosine_similarity_distance(prior_output.flatten(), prior_single_file_output.flatten()) assert max_diff < 8e-3 @require_torch_gpu def test_stable_cascade_unet_single_file_decoder_forward_pass(self): dtype = torch.float32 generator = torch.Generator("cpu") model_inputs = { "sample": randn_tensor((1, 4, 256, 256), generator=generator.manual_seed(0)).to("cuda", dtype), "timestep_ratio": torch.tensor([1]).to("cuda", dtype), "clip_text": randn_tensor((1, 77, 1280), generator=generator.manual_seed(0)).to("cuda", dtype), "clip_text_pooled": randn_tensor((1, 1, 1280), generator=generator.manual_seed(0)).to("cuda", dtype), "pixels": randn_tensor((1, 3, 8, 8), generator=generator.manual_seed(0)).to("cuda", dtype), } unet = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade", subfolder="decoder", revision="refs/pr/44", torch_dtype=dtype, ) unet.to("cuda") with torch.no_grad(): prior_output = unet(**model_inputs).sample.float().cpu().numpy() # Remove UNet from GPU memory before loading the single file UNet model del unet gc.collect() torch.cuda.empty_cache() single_file_url = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b.safetensors" single_file_unet = StableCascadeUNet.from_single_file(single_file_url, torch_dtype=dtype) single_file_unet.to("cuda") with torch.no_grad(): prior_single_file_output = single_file_unet(**model_inputs).sample.float().cpu().numpy() # Remove UNet from GPU memory before loading the single file UNet model del single_file_unet gc.collect() torch.cuda.empty_cache() max_diff = numpy_cosine_similarity_distance(prior_output.flatten(), prior_single_file_output.flatten()) assert max_diff < 1e-4
diffusers/tests/models/unets/test_models_unet_stable_cascade.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_stable_cascade.py", "repo_id": "diffusers", "token_count": 3387 }
148
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AmusedImg2ImgPipeline, AmusedScheduler, UVit2DModel, VQModel from diffusers.utils import load_image from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AmusedImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AmusedImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", } def get_dummy_components(self): torch.manual_seed(0) transformer = UVit2DModel( hidden_size=32, use_bias=False, hidden_dropout=0.0, cond_embed_dim=32, micro_cond_encode_dim=2, micro_cond_embed_dim=10, encoder_hidden_size=32, vocab_size=32, codebook_size=32, in_channels=32, block_out_channels=32, num_res_blocks=1, downsample=True, upsample=True, block_num_heads=1, num_hidden_layers=1, num_attention_heads=1, attention_dropout=0.0, intermediate_size=32, layer_norm_eps=1e-06, ln_elementwise_affine=True, ) scheduler = AmusedScheduler(mask_token_id=31) torch.manual_seed(0) vqvae = VQModel( act_fn="silu", block_out_channels=[32], down_block_types=[ "DownEncoderBlock2D", ], in_channels=3, latent_channels=32, layers_per_block=2, norm_num_groups=32, num_vq_embeddings=32, out_channels=3, sample_size=32, up_block_types=[ "UpDecoderBlock2D", ], mid_block_add_attention=False, lookup_from_codebook=True, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, projection_dim=32, ) text_encoder = CLIPTextModelWithProjection(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "transformer": transformer, "scheduler": scheduler, "vqvae": vqvae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = torch.full((1, 3, 4, 4), 1.0, dtype=torch.float32, device=device) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "np", "image": image, } return inputs def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") def test_inference_batch_single_identical(self): ... @slow @require_torch_gpu class AmusedImg2ImgPipelineSlowTests(unittest.TestCase): def test_amused_256(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((256, 256)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.9993, 1.0, 0.9996, 1.0, 0.9995, 0.9925, 0.9990, 0.9954, 1.0]) assert np.abs(image_slice - expected_slice).max() < 1e-2 def test_amused_256_fp16(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256", torch_dtype=torch.float16, variant="fp16") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((256, 256)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.9980, 0.9980, 0.9940, 0.9944, 0.9960, 0.9908, 1.0, 1.0, 0.9986]) assert np.abs(image_slice - expected_slice).max() < 1e-2 def test_amused_512(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-512") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((512, 512)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1344, 0.0985, 0.0, 0.1194, 0.1809, 0.0765, 0.0854, 0.1371, 0.0933]) assert np.abs(image_slice - expected_slice).max() < 0.1 def test_amused_512_fp16(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16) pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((512, 512)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1536, 0.1767, 0.0227, 0.1079, 0.2400, 0.1427, 0.1511, 0.1564, 0.1542]) assert np.abs(image_slice - expected_slice).max() < 0.1
diffusers/tests/pipelines/amused/test_amused_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/amused/test_amused_img2img.py", "repo_id": "diffusers", "token_count": 3982 }
149
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetImg2ImgPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"}) image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class StableDiffusionMultiControlNetPipelineFastTests( IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal_(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "evil space-punk bird" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) output = pipe( prompt, image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=50, strength=0.6, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload() controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_sf = StableDiffusionControlNetImg2ImgPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, scheduler_type="pndm", ) pipe_sf.unet.set_default_attn_processor() pipe_sf.enable_model_cpu_offload() control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) prompt = "bird" generator = torch.Generator(device="cpu").manual_seed(0) output = pipe( prompt, image=image, control_image=control_image, strength=0.9, generator=generator, output_type="np", num_inference_steps=3, ).images[0] generator = torch.Generator(device="cpu").manual_seed(0) output_sf = pipe_sf( prompt, image=image, control_image=control_image, strength=0.9, generator=generator, output_type="np", num_inference_steps=3, ).images[0] max_diff = numpy_cosine_similarity_distance(output_sf.flatten(), output.flatten()) assert max_diff < 1e-3
diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py", "repo_id": "diffusers", "token_count": 8286 }
150
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class IFInpaintingPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFInpaintingPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): return self._get_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) mask_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @slow @require_torch_gpu class IFInpaintingPipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_if_inpainting(self): pipe = IFInpaintingPipeline.from_pretrained( "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16 ) pipe.unet.set_attn_processor(AttnAddedKVProcessor()) pipe.enable_model_cpu_offload() # Super resolution test torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) mask_image = floats_tensor((1, 3, 64, 64), rng=random.Random(1)).to(torch_device) generator = torch.Generator(device="cpu").manual_seed(0) output = pipe( prompt="anime prompts", image=image, mask_image=mask_image, num_inference_steps=2, generator=generator, output_type="np", ) image = output.images[0] mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 12 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(image, expected_image) pipe.remove_all_hooks()
diffusers/tests/pipelines/deepfloyd_if/test_if_inpainting.py/0
{ "file_path": "diffusers/tests/pipelines/deepfloyd_if/test_if_inpainting.py", "repo_id": "diffusers", "token_count": 2035 }
151
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import ( CLIPProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, StableDiffusionGLIGENTextImagePipeline, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import CLIPImageProjection from diffusers.utils import load_image from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class GligenTextImagePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionGLIGENTextImagePipeline params = TEXT_TO_IMAGE_PARAMS | {"gligen_phrases", "gligen_images", "gligen_boxes"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_type="gated-text-image", ) # unet.position_net = PositionNet(32,32) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image_encoder_config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") image_project = CLIPImageProjection(hidden_size=32) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": image_encoder, "image_project": image_project, "processor": processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) gligen_images = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png" ) inputs = { "prompt": "A modern livingroom", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "gligen_phrases": ["a birthday cake"], "gligen_images": [gligen_images], "gligen_boxes": [[0.2676, 0.6088, 0.4773, 0.7183]], "output_type": "np", } return inputs def test_stable_diffusion_gligen_text_image_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENTextImagePipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5069, 0.5561, 0.4577, 0.4792, 0.5203, 0.4089, 0.5039, 0.4919, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_gligen_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENTextImagePipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.425, 0.494, 0.429, 0.469, 0.525, 0.417, 0.533, 0.5, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3)
diffusers/tests/pipelines/stable_diffusion_gligen_text_image/test_stable_diffusion_gligen_text_image.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_gligen_text_image/test_stable_diffusion_gligen_text_image.py", "repo_id": "diffusers", "token_count": 3232 }
152
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, AutoencoderTiny, DDIMScheduler, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, ) enable_full_determinism() class StableDiffusionXLImg2ImgPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( {"add_text_embeds", "add_time_ids", "add_neg_time_ids"} ) def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, time_cond_proj_dim=time_cond_proj_dim, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, image_size=224, projection_dim=32, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "requires_aesthetics_score": True, "image_encoder": image_encoder, "feature_extractor": feature_extractor, } return components def get_dummy_tiny_autoencoder(self): return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_stable_diffusion_xl_img2img_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4664, 0.4886, 0.4403, 0.6902, 0.5592, 0.4534, 0.5931, 0.5951, 0.5224]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_img2img_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_img2img_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass def test_stable_diffusion_xl_img2img_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_xl_img2img_tiny_autoencoder(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.vae = self.get_dummy_tiny_autoencoder() sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.0, 0.0, 0.0106, 0.0, 0.0, 0.0087, 0.0052, 0.0062, 0.0177]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 5 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) class StableDiffusionXLImg2ImgRefinerOnlyPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "tokenizer": None, "text_encoder": None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "requires_aesthetics_score": True, "image_encoder": None, "feature_extractor": None, } return components def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_stable_diffusion_xl_img2img_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4745, 0.4924, 0.4338, 0.6468, 0.5547, 0.4419, 0.5646, 0.5897, 0.5146]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_stable_diffusion_xl_img2img_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_xl_img2img_prompt_embeds_only(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, _, pooled_prompt_embeds, _, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @slow class StableDiffusionXLImg2ImgIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_download_ckpt_diff_format_is_same(self): ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors" init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image = pipe( prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" ).images[0] pipe_single_file = StableDiffusionXLImg2ImgPipeline.from_single_file(ckpt_path, torch_dtype=torch.float16) pipe_single_file.scheduler = DDIMScheduler.from_config(pipe_single_file.scheduler.config) pipe_single_file.unet.set_default_attn_processor() pipe_single_file.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image_single_file = pipe_single_file( prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" ).images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_single_file.flatten()) assert max_diff < 5e-2 def test_single_file_component_configs(self): pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", ) ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors" single_file_pipe = StableDiffusionXLImg2ImgPipeline.from_single_file(ckpt_path, torch_dtype=torch.float16) assert pipe.text_encoder is None assert single_file_pipe.text_encoder is None for param_name, param_value in single_file_pipe.text_encoder_2.config.to_dict().items(): if param_name in ["torch_dtype", "architectures", "_name_or_path"]: continue assert pipe.text_encoder_2.config.to_dict()[param_name] == param_value PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "architectures", "_use_default_values"] for param_name, param_value in single_file_pipe.unet.config.items(): if param_name in PARAMS_TO_IGNORE: continue if param_name == "upcast_attention" and pipe.unet.config[param_name] is None: pipe.unet.config[param_name] = False assert ( pipe.unet.config[param_name] == param_value ), f"{param_name} is differs between single file loading and pretrained loading" for param_name, param_value in single_file_pipe.vae.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( pipe.vae.config[param_name] == param_value ), f"{param_name} differs between single file loading and pretrained loading"
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py", "repo_id": "diffusers", "token_count": 15327 }
153
import torch from diffusers import DDIMScheduler from .test_schedulers import SchedulerCommonTest class DDIMSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDIMScheduler,) forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50)) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**kwargs) return config def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta).prev_sample return sample def test_timesteps(self): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1])) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_timestep_spacing(self): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=timestep_spacing) def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): for t in [1, 10, 49]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) def test_eta(self): for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]): self.check_over_forward(time_step=t, eta=eta) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5 def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 172.0067) < 1e-2 assert abs(result_mean.item() - 0.223967) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 52.5302) < 1e-2 assert abs(result_mean.item() - 0.0684) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 149.8295) < 1e-2 assert abs(result_mean.item() - 0.1951) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 149.0784) < 1e-2 assert abs(result_mean.item() - 0.1941) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 t_start = 8 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 354.5418) < 1e-2, f" expected result sum 218.4379, but get {result_sum}" assert abs(result_mean.item() - 0.4616) < 1e-3, f" expected result mean 0.2844, but get {result_mean}"
diffusers/tests/schedulers/test_scheduler_ddim.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_ddim.py", "repo_id": "diffusers", "token_count": 3127 }
154
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class IPNDMSchedulerTest(SchedulerCommonTest): scheduler_classes = (IPNDMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = {"num_train_timesteps": 1000} config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.ets = dummy_past_residuals[:] if time_step is None: time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.ets = dummy_past_residuals[:] if time_step is None: time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample scheduler._step_index = None for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] scheduler.ets = dummy_past_residuals[:] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=timesteps, time_step=None) def test_inference_steps(self): for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): self.check_over_forward(num_inference_steps=num_inference_steps, time_step=None) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 2540529) < 10
diffusers/tests/schedulers/test_scheduler_ipndm.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_ipndm.py", "repo_id": "diffusers", "token_count": 3120 }
155
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py PATH_TO_DIFFUSERS = "src/diffusers" # Matches is_xxx_available() _re_backend = re.compile(r"is\_([a-z_]*)_available\(\)") # Matches from xxx import bla _re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def find_backend(line): """Find one (or multiple) backend in a code line of the init.""" backends = _re_backend.findall(line) if len(backends) == 0: return None return "_and_".join(backends) def read_init(): """Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects.""" with open(os.path.join(PATH_TO_DIFFUSERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Get to the point we do the actual imports for type checking line_index = 0 while not lines[line_index].startswith("if TYPE_CHECKING"): line_index += 1 backend_specific_objects = {} # Go through the end of the file while line_index < len(lines): # If the line contains is_backend_available, we grab all objects associated with the `else` block backend = find_backend(lines[line_index]) if backend is not None: while not lines[line_index].startswith(" else:"): line_index += 1 line_index += 1 objects = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): line = lines[line_index] single_line_import_search = _re_single_line_import.search(line) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 12): objects.append(line[12:-2]) line_index += 1 if len(objects) > 0: backend_specific_objects[backend] = objects else: line_index += 1 return backend_specific_objects def create_dummy_object(name, backend_name): """Create the code for the dummy object corresponding to `name`.""" if name.isupper(): return DUMMY_CONSTANT.format(name) elif name.islower(): return DUMMY_FUNCTION.format(name, backend_name) else: return DUMMY_CLASS.format(name, backend_name) def create_dummy_files(backend_specific_objects=None): """Create the content of the dummy files.""" if backend_specific_objects is None: backend_specific_objects = read_init() # For special correspondence backend to module name as used in the function requires_modulename dummy_files = {} for backend, objects in backend_specific_objects.items(): backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]" dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects]) dummy_files[backend] = dummy_file return dummy_files def check_dummies(overwrite=False): """Check if the dummy files are up to date and maybe `overwrite` with the right content.""" dummy_files = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py short_names = {"torch": "pt"} # Locate actual dummy modules and read their content. path = os.path.join(PATH_TO_DIFFUSERS, "utils") dummy_file_paths = { backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py") for backend in dummy_files.keys() } actual_dummies = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(file_path): with open(file_path, "r", encoding="utf-8", newline="\n") as f: actual_dummies[backend] = f.read() else: actual_dummies[backend] = "" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"Updating diffusers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main " "__init__ has new objects." ) with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f: f.write(dummy_files[backend]) else: raise ValueError( "The main __init__ has objects that are not present in " f"diffusers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` " "to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_dummies(args.fix_and_overwrite)
diffusers/utils/check_dummies.py/0
{ "file_path": "diffusers/utils/check_dummies.py", "repo_id": "diffusers", "token_count": 2591 }
156
<jupyter_start><jupyter_text>Fine-Tuning and GuidanceIn this notebook, we're going to cover two main approaches for adapting existing diffusion models:* With **fine-tuning**, we'll re-train existing models on new data to change the type of output they produce* With **guidance**, we'll take an existing model and steer the generation process at inference time for additional control What You Will Learn:By the end of this notebook, you will know how to:- Create a sampling loop and generate samples faster using a new scheduler- Fine-tune an existing diffusion model on new data, including: - Using gradient accumulation to get around some of the issues with small batches - Logging samples to [Weights and Biases](https://wandb.ai/site) during training to monitor progress (via the accompanying example script) - Saving the resulting pipeline and uploading it to the hub- Guide the sampling process with additional loss functions to add control over existing models, including: - Exploring different guidance approaches with a simple color-based loss - Using CLIP to guide generation using a text prompt - Sharing a custom sampling loop using Gradio and 🤗 Spaces❓If you have any questions, please post them on the `diffusion-models-class` channel on the Hugging Face Discord server. If you haven't signed up yet, you can do so here: https://huggingface.co/join/discord Setup and ImportsTo save your fine-tuned models to the Hugging Face Hub, you'll need to login with a **token that has write access**. The code below will prompt you for this and link to the relevant tokens page of your account. You'll also need a Weights and Biases account if you'd like to use the training script to log samples as the model trains - again, the code should prompt you to sign in where needed.Apart from that, the only set-up is installing a few dependencies, importing everything we'll need and specifying which device we'll use:<jupyter_code>%pip install -qq diffusers datasets accelerate wandb open-clip-torch # Code to log in to the Hugging Face Hub, needed for sharing models # Make sure you use a token with WRITE access from huggingface_hub import notebook_login notebook_login() import numpy as np import torch import torch.nn.functional as F import torchvision from datasets import load_dataset from diffusers import DDIMScheduler, DDPMPipeline from matplotlib import pyplot as plt from PIL import Image from torchvision import transforms from tqdm.auto import tqdm device = ( "mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu" )<jupyter_output><empty_output><jupyter_text>Loading A Pre-Trained PipelineTo begin this notebook, let's load an existing pipeline and see what we can do with it:<jupyter_code>image_pipe = DDPMPipeline.from_pretrained("google/ddpm-celebahq-256") image_pipe.to(device);<jupyter_output><empty_output><jupyter_text>Generating images is as simple as running the [`__call__`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddpm/pipeline_ddpm.pyL42) method of the pipeline by calling it like a function:<jupyter_code>images = image_pipe().images images[0]<jupyter_output><empty_output><jupyter_text>Neat, but SLOW! So, before we get to the main topics of today, let's take a peek at the actual sampling loop and see how we can use a fancier sampler to speed this up: Faster Sampling with DDIMAt every step, the model is fed a noisy input and asked to predict the noise (and thus an estimate of what the fully denoised image might look like). Initially these predictions are not very good, which is why we break the process down into many steps. However, using 1000+ steps has been found to be unnecessary, and a flurry of recent research has explored how to achieve good samples with as few steps as possible. In the 🤗 Diffusers library, these **sampling methods are handled by a scheduler**, which must perform each update via the `step()` function. To generate an image, we begin with random noise $x$. Then, for every timestep in the scheduler's noise schedule, we feed the noisy input $x$ to the model and pass the resulting prediction to the `step()` function. This returns an output with a `prev_sample` attribute - previous because we're going "backwards" in time from high noise to low noise (the opposite of the forward diffusion process). Let's see this in action! First, we load a scheduler, here a DDIMScheduler based on the paper [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) which can give decent samples in much fewer steps than the original DDPM implementation:<jupyter_code># Create new scheduler and set num inference steps scheduler = DDIMScheduler.from_pretrained("google/ddpm-celebahq-256") scheduler.set_timesteps(num_inference_steps=40)<jupyter_output><empty_output><jupyter_text>You can see that this model does 40 steps total, each jumping the equivalent of 25 steps of the original 1000-step schedule:<jupyter_code>scheduler.timesteps<jupyter_output><empty_output><jupyter_text>Let's create 4 random images and run through the sampling loop, viewing both the current $x$ and the predicted denoised version as the process progresses:<jupyter_code># The random starting point x = torch.randn(4, 3, 256, 256).to(device) # Batch of 4, 3-channel 256 x 256 px images # Loop through the sampling timesteps for i, t in tqdm(enumerate(scheduler.timesteps)): # Prepare model input model_input = scheduler.scale_model_input(x, t) # Get the prediction with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] # Calculate what the updated sample should look like with the scheduler scheduler_output = scheduler.step(noise_pred, t, x) # Update x x = scheduler_output.prev_sample # Occasionally display both x and the predicted denoised images if i % 10 == 0 or i == len(scheduler.timesteps) - 1: fig, axs = plt.subplots(1, 2, figsize=(12, 5)) grid = torchvision.utils.make_grid(x, nrow=4).permute(1, 2, 0) axs[0].imshow(grid.cpu().clip(-1, 1) * 0.5 + 0.5) axs[0].set_title(f"Current x (step {i})") pred_x0 = ( scheduler_output.pred_original_sample ) # Not available for all schedulers grid = torchvision.utils.make_grid(pred_x0, nrow=4).permute(1, 2, 0) axs[1].imshow(grid.cpu().clip(-1, 1) * 0.5 + 0.5) axs[1].set_title(f"Predicted denoised images (step {i})") plt.show()<jupyter_output><empty_output><jupyter_text>As you can see, the initial predictions are not great but as the process goes on the predicted outputs get more and more refined. If you're curious what maths is happening inside that `step()` function, inspect the (well-commented) code with:<jupyter_code># ??scheduler.step<jupyter_output><empty_output><jupyter_text>You can also drop in this new scheduler in place of the original one that came with the pipeline, and sample like so:<jupyter_code>image_pipe.scheduler = scheduler images = image_pipe(num_inference_steps=40).images images[0]<jupyter_output><empty_output><jupyter_text>Alright - we can get samples in a reasonable time now! This should speed things up as we move through the rest of this notebook :) Fine-TuningNow for the fun bit! Given this pre-trained pipeline, how might we re-train the model to generate images based on new training data?It turns out that this looks nearly identical to training a model from scratch (as we saw in [Unit 1](https://github.com/huggingface/diffusion-models-class/tree/main/unit1)) except that we begin with the existing model. Let's see this in action and talk about a few additional considerations as we go. First, the dataset: you could try [this vintage faces dataset](https://huggingface.co/datasets/Norod78/Vintage-Faces-FFHQAligned) or [these anime faces](https://huggingface.co/datasets/huggan/anime-faces) for something closer to the original training data of this faces model, but just for fun let's instead use the same small butterflies dataset we used to train from scratch in Unit 1. Run the code below to download the butterflies dataset and create a dataloader we can sample a batch of images from:<jupyter_code># @markdown load and prepare a dataset: # Not on Colab? Comments with #@ enable UI tweaks like headings or user inputs # but can safely be ignored if you're working on a different platform. dataset_name = "huggan/smithsonian_butterflies_subset" # @param dataset = load_dataset(dataset_name, split="train") image_size = 256 # @param batch_size = 4 # @param preprocess = transforms.Compose( [ transforms.Resize((image_size, image_size)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def transform(examples): images = [preprocess(image.convert("RGB")) for image in examples["image"]] return {"images": images} dataset.set_transform(transform) train_dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=True ) print("Previewing batch:") batch = next(iter(train_dataloader)) grid = torchvision.utils.make_grid(batch["images"], nrow=4) plt.imshow(grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5);<jupyter_output>Using custom data configuration huggan--smithsonian_butterflies_subset-7665b1021a37404c Found cached dataset parquet (/home/lewis_huggingface_co/.cache/huggingface/datasets/huggan___parquet/huggan--smithsonian_butterflies_subset-7665b1021a37404c/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)<jupyter_text>**Consideration 1:** our batch size here (4) is pretty small, since we're training at large image size (256px) using a fairly large model and we'll run out of GPU RAM if we push the batch size too high. You can reduce the image size to speed things up and allow for larger batches, but these models were designed and originally trained for 256px generation. Now for the training loop. We'll update the weights of the pre-trained model by setting the optimization target to `image_pipe.unet.parameters()`. The rest is nearly identical to the example training loop from Unit 1. This takes about 10 minutes to run on Colab, so now is a good time to grab a coffee of tea while you wait:<jupyter_code>num_epochs = 2 # @param lr = 1e-5 # 2param grad_accumulation_steps = 2 # @param optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=lr) losses = [] for epoch in range(num_epochs): for step, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader)): clean_images = batch["images"].to(device) # Sample noise to add to the images noise = torch.randn(clean_images.shape).to(clean_images.device) bs = clean_images.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, image_pipe.scheduler.num_train_timesteps, (bs,), device=clean_images.device, ).long() # Add noise to the clean images according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_images = image_pipe.scheduler.add_noise(clean_images, noise, timesteps) # Get the model prediction for the noise noise_pred = image_pipe.unet(noisy_images, timesteps, return_dict=False)[0] # Compare the prediction with the actual noise: loss = F.mse_loss( noise_pred, noise ) # NB - trying to predict noise (eps) not (noisy_ims-clean_ims) or just (clean_ims) # Store for later plotting losses.append(loss.item()) # Update the model parameters with the optimizer based on this loss loss.backward(loss) # Gradient accumulation: if (step + 1) % grad_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() print( f"Epoch {epoch} average loss: {sum(losses[-len(train_dataloader):])/len(train_dataloader)}" ) # Plot the loss curve: plt.plot(losses)<jupyter_output><empty_output><jupyter_text>**Consideration 2:** Our loss signal is extremely noisy, since we're only working with four examples at random noise levels for each step. This is not ideal for training. One fix is to use an extremely low learning rate to limit the size of the update each step. It would be even better if we could find some way to get the same benefit we would get from using a larger batch size _without_ the memory requirements skyrocketing... Enter [gradient accumulation](https://kozodoi.me/python/deep%20learning/pytorch/tutorial/2021/02/19/gradient-accumulation.html:~:text=Simply%20speaking%2C%20gradient%20accumulation%20means,might%20find%20this%20tutorial%20useful.). If we call `loss.backward()` multiple times before running `optimizer.step()` and `optimizer.zero_grad()`, then PyTorch accumulates (sums) the gradients, effectively merging the signal from several batches to give a single (better) estimate which is then used to update the parameters. This results in fewer total updates being made, just like we'd see if we used a larger batch size. This is something many frameworks will handle for you (for example, [🤗 Accelerate makes this easy](https://huggingface.co/docs/accelerate/usage_guides/gradient_accumulation)) but it is nice to see it implemented from scratch since this is a useful technique for dealing with training under GPU memory constraints! As you can see from the code above (after the ` Gradient accumulation` comment) there really isn't much code needed.<jupyter_code># Exercise: See if you can add gradient accumulation to the training loop in Unit 1. # How does it perform? Think how you might adjust the learning rate based on the # number of gradient accumulation steps - should it stay the same as before?<jupyter_output><empty_output><jupyter_text>**Consideration 3:** This still takes a lot of time, and printing out a one-line update every epoch is not enough feedback to give us a good idea of what is going on. We should probably:* Generate some samples occasionally to visually examine the performance qualitatively as the model trains* Log things like the loss and sample generations during training, perhaps using something like Weights and Biases or tensorboard.I created a quick script ([`finetune_model.py`](https://github.com/huggingface/diffusion-models-class/blob/main/unit2/finetune_model.py)) that takes the training code above and adds minimal logging functionality. You can see the [logs from one training run here](https://wandb.ai/johnowhitaker/dm_finetune/runs/2upaa341) below:<jupyter_code>%wandb johnowhitaker/dm_finetune/2upaa341 # You'll need a W&B account for this to work - skip if you don't want to log in<jupyter_output><empty_output><jupyter_text>It's fun to see how the generated samples change as training progresses - even though the loss doesn't appear to be improving much, we can see a progression away from the original domain (images of bedrooms) towards the new training data (wikiart). At the end of this notebook is commented-out code for fine-tuning a model using this script as an alternative to running the cell above.<jupyter_code># Exercise: see if you can modify the official example training script we saw # in Unit 1 to begin with a pre-trained model rather than training from scratch. # Compare it to the minimal script linked above - what extra features is the minimal script missing?<jupyter_output><empty_output><jupyter_text>Generating some images with this model, we can see that these faces are already looking mighty strange!<jupyter_code># @markdown Generate and plot some images: x = torch.randn(8, 3, 256, 256).to(device) # Batch of 8 for i, t in tqdm(enumerate(scheduler.timesteps)): model_input = scheduler.scale_model_input(x, t) with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] x = scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x, nrow=4) plt.imshow(grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5);<jupyter_output><empty_output><jupyter_text>**Consideration 4:** Fine-tuning can be quite unpredictable! If we trained for a lot longer, we might see some perfect butterflies. But the intermediate steps can be extremely interesting in their own right, especially if your interests are more towards the artistic side! Explore training for very short or very long periods of time, and varying the learning rate to see how this affects the kinds of output the final model produces. Code for fine-tuning a model using the minimal example script we used on the WikiArt demo modelIf you'd like to train a similar model to the one I made on WikiArt, you can uncomment and run the cells below. Since this takes a while and may exhaust your GPU memory, I recommend doing this _after_ working through the rest of this notebook.<jupyter_code>## To download the fine-tuning script: # !wget https://github.com/huggingface/diffusion-models-class/raw/main/unit2/finetune_model.py ## To run the script, training the face model on some vintage faces ## (ideally run this in a terminal): # !python finetune_model.py --image_size 128 --batch_size 8 --num_epochs 16\ # --grad_accumulation_steps 2 --start_model "google/ddpm-celebahq-256"\ # --dataset_name "Norod78/Vintage-Faces-FFHQAligned" --wandb_project 'dm-finetune'\ # --log_samples_every 100 --save_model_every 1000 --model_save_name 'vintageface'<jupyter_output><empty_output><jupyter_text>Saving and Loading Fine-Tuned PipelinesNow that we've fine-tuned the U-Net in our diffusion model, let's save it to a local folder by running:<jupyter_code>image_pipe.save_pretrained("my-finetuned-model")<jupyter_output><empty_output><jupyter_text>As we saw in Unit 1, this will save the config, model, scheduler:<jupyter_code>!ls {"my-finetuned-model"}<jupyter_output>model_index.json scheduler unet<jupyter_text>Next, you can follow the same steps outlined in Unit 1's [Introduction to Diffusers](https://github.com/huggingface/diffusion-models-class/blob/main/unit1/01_introduction_to_diffusers.ipynb) to push the model to the Hub for later use:<jupyter_code># @title Upload a locally saved pipeline to the hub # Code to upload a pipeline saved locally to the hub from huggingface_hub import HfApi, ModelCard, create_repo, get_full_repo_name # Set up repo and upload files model_name = "ddpm-celebahq-finetuned-butterflies-2epochs" # @param What you want it called on the hub local_folder_name = "my-finetuned-model" # @param Created by the script or one you created via image_pipe.save_pretrained('save_name') description = "Describe your model here" # @param hub_model_id = get_full_repo_name(model_name) create_repo(hub_model_id) api = HfApi() api.upload_folder( folder_path=f"{local_folder_name}/scheduler", path_in_repo="", repo_id=hub_model_id ) api.upload_folder( folder_path=f"{local_folder_name}/unet", path_in_repo="", repo_id=hub_model_id ) api.upload_file( path_or_fileobj=f"{local_folder_name}/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, ) # Add a model card (optional but nice!) content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Example Fine-Tuned Model for Unit 2 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) {description} ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('{hub_model_id}') image = pipeline().images[0] image ``` """ card = ModelCard(content) card.push_to_hub(hub_model_id)<jupyter_output><empty_output><jupyter_text>Congratulations, you've now fine-tuned your first diffusion model!For the rest of this notebook we'll use a [model](https://huggingface.co/johnowhitaker/sd-class-wikiart-from-bedrooms) I fine-tuned from [this model trained on LSUN bedrooms](https://huggingface.co/google/ddpm-bedroom-256) approximately one epoch on the [WikiArt dataset](https://huggingface.co/datasets/huggan/wikiart). If you'd prefer, you can skip this cell and use the faces/butterflies pipeline we fine-tuned in the previous section or load one from the Hub instead:<jupyter_code># Load the pretrained pipeline pipeline_name = "johnowhitaker/sd-class-wikiart-from-bedrooms" image_pipe = DDPMPipeline.from_pretrained(pipeline_name).to(device) # Sample some images with a DDIM Scheduler over 40 steps scheduler = DDIMScheduler.from_pretrained(pipeline_name) scheduler.set_timesteps(num_inference_steps=40) # Random starting point (batch of 8 images) x = torch.randn(8, 3, 256, 256).to(device) # Minimal sampling loop for i, t in tqdm(enumerate(scheduler.timesteps)): model_input = scheduler.scale_model_input(x, t) with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] x = scheduler.step(noise_pred, t, x).prev_sample # View the results grid = torchvision.utils.make_grid(x, nrow=4) plt.imshow(grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5);<jupyter_output><empty_output><jupyter_text>**Consideration 5:** It is often hard to tell how well fine-tuning is working, and what 'good performance' means may vary by use-case. For example, if you're fine-tuning a text-conditioned model like stable diffusion on a small dataset you probably want it to **retain** most of its original training so that it can understand arbitrary prompts not covered by your new dataset, while **adapting** to better match the style of your new training data. This could mean using a low learning rate alongside something like exponential model averaging, as demonstrated [in this great blog post about creating a pokemon version of stable diffusion](https://lambdalabs.com/blog/how-to-fine-tune-stable-diffusion-how-we-made-the-text-to-pokemon-model-at-lambda). In a different situation, you may want to completely re-train a model on new data (such as our bedroom -> wikiart example) in which case a larger learning rate and more training makes sense. Even though the [loss plot](https://wandb.ai/johnowhitaker/dm_finetune/runs/2upaa341) is not showing much improvement, the samples clearly show a move away from the original data and towards more 'artsy' outputs, although they remain mostly incoherent.Which leads us to a the next section, as we examine how we might add additional guidance to such a model for better control over the outputs... GuidanceWhat do we do if we want some control over the samples generated? For example, say we wanted to bias the generated images to be a specific color. How would we go about that? Enter **guidance**, a technique for adding additional control to the sampling process. Step one is to create our conditioning function: some measure (loss) which we'd like to minimize. Here's one for the color example, which compares the pixels of an image to a target color (by default a sort of light teal) and returns the average error:<jupyter_code>def color_loss(images, target_color=(0.1, 0.9, 0.5)): """Given a target color (R, G, B) return a loss for how far away on average the images' pixels are from that color. Defaults to a light teal: (0.1, 0.9, 0.5)""" target = ( torch.tensor(target_color).to(images.device) * 2 - 1 ) # Map target color to (-1, 1) target = target[ None, :, None, None ] # Get shape right to work with the images (b, c, h, w) error = torch.abs( images - target ).mean() # Mean absolute difference between the image pixels and the target color return error<jupyter_output><empty_output><jupyter_text>Next, we'll make a modified version of the sampling loop where, at each step, we do the following:- Create a new version of x that has requires_grad = True- Calculate the denoised version (x0)- Feed the predicted x0 through our loss function- Find the **gradient** of this loss function with respect to x- Use this conditioning gradient to modify x before we step with the scheduler, hopefully pushing x in a direction that will lead to lower loss according to our guidance functionThere are two variants here that you can explore. In the first, we set requires_grad on x **after** we get our noise prediction from the UNet, which is more memory efficient (since we don't have to trace gradients back through the diffusion model) but gives a less accurate gradient. In the second we set requires_grad on x **first**, then feed it through the UNet and calculate the predicted x0.<jupyter_code># Variant 1: shortcut method # The guidance scale determines the strength of the effect guidance_loss_scale = 40 # Explore changing this to 5, or 100 x = torch.randn(8, 3, 256, 256).to(device) for i, t in tqdm(enumerate(scheduler.timesteps)): # Prepare the model input model_input = scheduler.scale_model_input(x, t) # predict the noise residual with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] # Set x.requires_grad to True x = x.detach().requires_grad_() # Get the predicted x0 x0 = scheduler.step(noise_pred, t, x).pred_original_sample # Calculate loss loss = color_loss(x0) * guidance_loss_scale if i % 10 == 0: print(i, "loss:", loss.item()) # Get gradient cond_grad = -torch.autograd.grad(loss, x)[0] # Modify x based on this gradient x = x.detach() + cond_grad # Now step with scheduler x = scheduler.step(noise_pred, t, x).prev_sample # View the output grid = torchvision.utils.make_grid(x, nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5 Image.fromarray(np.array(im * 255).astype(np.uint8))<jupyter_output><empty_output><jupyter_text>This second option requires nearly double the GPU RAM to run, even though we only generate a batch of four images instead of eight. See if you can spot the difference, and think through why this way is more 'accurate':<jupyter_code># Variant 2: setting x.requires_grad before calculating the model predictions guidance_loss_scale = 40 x = torch.randn(4, 3, 256, 256).to(device) for i, t in tqdm(enumerate(scheduler.timesteps)): # Set requires_grad before the model forward pass x = x.detach().requires_grad_() model_input = scheduler.scale_model_input(x, t) # predict (with grad this time) noise_pred = image_pipe.unet(model_input, t)["sample"] # Get the predicted x0: x0 = scheduler.step(noise_pred, t, x).pred_original_sample # Calculate loss loss = color_loss(x0) * guidance_loss_scale if i % 10 == 0: print(i, "loss:", loss.item()) # Get gradient cond_grad = -torch.autograd.grad(loss, x)[0] # Modify x based on this gradient x = x.detach() + cond_grad # Now step with scheduler x = scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x, nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5 Image.fromarray(np.array(im * 255).astype(np.uint8))<jupyter_output><empty_output><jupyter_text>In the second variant, the memory requirements are higher and the effect is less pronounced, so you may think that this is inferior. However, the outputs are arguably closer to the types of images the model was trained on, and you can always increase the guidance scale for a stronger effect. Which approach you use will ultimately come down to what works best experimentally.<jupyter_code># Exercise: pick your favourite colour and look up it's values in RGB space. # Edit the `color_loss()` line in the cell above to receive these new RGB values and examine the outputs - do they match what you expect?<jupyter_output><empty_output><jupyter_text>CLIP GuidanceGuiding towards a color gives us a little bit of control, but what if we could just type some text describing what we want?[CLIP](https://openai.com/blog/clip/) is a model created by OpenAI that allows us to compare images to text captions. This is extremely powerful, since it allows us to quantify how well an image matches a prompt. And since the process is differentiable, we can use this as a loss function to guide our diffusion model! We won't go too much into the details here. The basic approach is as follows:- Embed the text prompt to get a 512-dimensional CLIP embedding of the text- For every step in the diffusion model process: - Make several variants of the predicted denoised image (having multiple variations gives a cleaner loss signal) - For each one, embed the image with CLIP and compare this embedding with the text embedding of the prompt (using a measure called 'Great Circle Distance Squared')- Calculate the gradient of this loss with respect to the current noisy x and use this gradient to modify x before updating it with the scheduler. For a deeper explanation of CLIP, check out [this lesson on the topic](https://johnowhitaker.github.io/tglcourse/clip.html) or [this report on the OpenCLIP project](https://wandb.ai/johnowhitaker/openclip-benchmarking/reports/Exploring-OpenCLIP--VmlldzoyOTIzNzIz) which we're using to load the CLIP model. Run the next cell to load a CLIP model:<jupyter_code># @markdown load a CLIP model and define the loss function import open_clip clip_model, _, preprocess = open_clip.create_model_and_transforms( "ViT-B-32", pretrained="openai" ) clip_model.to(device) # Transforms to resize and augment an image + normalize to match CLIP's training data tfms = torchvision.transforms.Compose( [ torchvision.transforms.RandomResizedCrop(224), # Random CROP each time torchvision.transforms.RandomAffine( 5 ), # One possible random augmentation: skews the image torchvision.transforms.RandomHorizontalFlip(), # You can add additional augmentations if you like torchvision.transforms.Normalize( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) # And define a loss function that takes an image, embeds it and compares with # the text features of the prompt def clip_loss(image, text_features): image_features = clip_model.encode_image( tfms(image) ) # Note: applies the above transforms input_normed = torch.nn.functional.normalize(image_features.unsqueeze(1), dim=2) embed_normed = torch.nn.functional.normalize(text_features.unsqueeze(0), dim=2) dists = ( input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2) ) # Squared Great Circle Distance return dists.mean()<jupyter_output>100%|████████████████████████████████████████| 354M/354M [00:02<00:00, 120MiB/s]<jupyter_text>With a loss function defined, our guided sampling loop looks similar to the previous examples, replacing `color_loss()` with our new clip-based loss function:<jupyter_code># @markdown applying guidance using CLIP prompt = "Red Rose (still life), red flower painting" # @param # Explore changing this guidance_scale = 8 # @param n_cuts = 4 # @param # More steps -> more time for the guidance to have an effect scheduler.set_timesteps(50) # We embed a prompt with CLIP as our target text = open_clip.tokenize([prompt]).to(device) with torch.no_grad(), torch.cuda.amp.autocast(): text_features = clip_model.encode_text(text) x = torch.randn(4, 3, 256, 256).to( device ) # RAM usage is high, you may want only 1 image at a time for i, t in tqdm(enumerate(scheduler.timesteps)): model_input = scheduler.scale_model_input(x, t) # predict the noise residual with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] cond_grad = 0 for cut in range(n_cuts): # Set requires grad on x x = x.detach().requires_grad_() # Get the predicted x0: x0 = scheduler.step(noise_pred, t, x).pred_original_sample # Calculate loss loss = clip_loss(x0, text_features) * guidance_scale # Get gradient (scale by n_cuts since we want the average) cond_grad -= torch.autograd.grad(loss, x)[0] / n_cuts if i % 25 == 0: print("Step:", i, ", Guidance loss:", loss.item()) # Modify x based on this gradient alpha_bar = scheduler.alphas_cumprod[i] x = ( x.detach() + cond_grad * alpha_bar.sqrt() ) # Note the additional scaling factor here! # Now step with scheduler x = scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x.detach(), nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5 Image.fromarray(np.array(im * 255).astype(np.uint8))<jupyter_output><empty_output><jupyter_text>Those look sort of like roses! It's not perfect, but if you play around with the settings you can get some pleasing images with this. If you examine the code above you'll see I'm scaling the conditioning gradient by a factor of `alpha_bar.sqrt()`. There is some theory showing the 'right' way to scale these gradients, but in practice this is also something you can experiment with. For some types of guidance, you may want most of the effect concentrated in the early steps, for others (say, a style loss focused on textures) you may prefer that they only kick in towards the end of the generation process. Some possible schedules are shown below:<jupyter_code># @markdown Plotting some possible schedules: plt.plot([1 for a in scheduler.alphas_cumprod], label="no scaling") plt.plot([a for a in scheduler.alphas_cumprod], label="alpha_bar") plt.plot([a.sqrt() for a in scheduler.alphas_cumprod], label="alpha_bar.sqrt()") plt.plot( [(1 - a).sqrt() for a in scheduler.alphas_cumprod], label="(1-alpha_bar).sqrt()" ) plt.legend() plt.title("Possible guidance scaling schedules");<jupyter_output><empty_output><jupyter_text>Experiment with different schedules, guidance scales and any other tricks you can think of (clipping the gradients within some range is a popular modification) to see how good you can get this! Also make sure you try swapping in other models. Perhaps the faces model we loaded at the start - can you reliably guide it to produce a male face? What if you combine CLIP guidance with the color loss we used earlier? Etc.If you check out [some code for CLIP-guided diffusion in practice](https://huggingface.co/spaces/EleutherAI/clip-guided-diffusion/blob/main/app.py), you'll see a more complex approach with a better class for picking random cutouts from the images and lots of additional tweaks to the loss function for better performance. Before text-conditioned diffusion models came along, this was the best text-to-image system there was! Our little toy version here has lots of room to improve, but it captures the core idea: thanks to guidance plus the amazing capabilities of CLIP, we can add text control to an unconditional diffusion model 🎨. Sharing A Custom Sampling Loop as a Gradio DemoPerhaps you've figured out a fun loss to guide generation with, and you now want to share both your fine-tuned model and this custom sampling strategy with the world... Enter [Gradio](https://gradio.app/). Gradio is a free and open-source tool that allows users to easily create and share interactive machine learning models through a simple web interface. With Gradio, users can build custom interfaces for their machine learning models, which can then be shared with others through a unique URL. It is also integrated into 🤗 Spaces which makes it easy to host demos and share them with others. We'll put our core logic in a function that takes some inputs and produces an image as the output. This can then be wrapped in a simple interface that allows the user to specify some parameters (which are passed as inputs to the main generate function). There are many [components](https://gradio.app/docs/components) available - for this example we'll use a slider for the guidance scale and a color picker to define the target color.<jupyter_code>%pip install -q gradio # Install the library import gradio as gr from PIL import Image, ImageColor # The function that does the hard work def generate(color, guidance_loss_scale): target_color = ImageColor.getcolor(color, "RGB") # Target color as RGB target_color = [a / 255 for a in target_color] # Rescale from (0, 255) to (0, 1) x = torch.randn(1, 3, 256, 256).to(device) for i, t in tqdm(enumerate(scheduler.timesteps)): model_input = scheduler.scale_model_input(x, t) with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] x = x.detach().requires_grad_() x0 = scheduler.step(noise_pred, t, x).pred_original_sample loss = color_loss(x0, target_color) * guidance_loss_scale cond_grad = -torch.autograd.grad(loss, x)[0] x = x.detach() + cond_grad x = scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x, nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5 im = Image.fromarray(np.array(im * 255).astype(np.uint8)) im.save("test.jpeg") return im # See the gradio docs for the types of inputs and outputs available inputs = [ gr.ColorPicker(label="color", value="55FFAA"), # Add any inputs you need here gr.Slider(label="guidance_scale", minimum=0, maximum=30, value=3), ] outputs = gr.Image(label="result") # And the minimal interface demo = gr.Interface( fn=generate, inputs=inputs, outputs=outputs, examples=[ ["#BB2266", 3], ["#44CCAA", 5], # You can provide some example inputs to get people started ], ) demo.launch(debug=True) # debug=True allows you to see errors and output in Colab<jupyter_output><empty_output>
diffusion-models-class/unit2/01_finetuning_and_guidance.ipynb/0
{ "file_path": "diffusion-models-class/unit2/01_finetuning_and_guidance.ipynb", "repo_id": "diffusion-models-class", "token_count": 11877 }
157
- title: Course introduction sections: - local: unit0/1 title: Introduction - title: 1. Introduction to diffusion models sections: - local: unit1/1 title: Overview - local: unit1/2 title: Implementation with 🤗 Diffusers - local: unit1/3 title: Implementation from scratch - title: 2. Fine-Tuning, Guidance and Conditioning sections: - local: unit2/1 title: Overview - local: unit2/2 title: Fine-Tuning and guidance - local: unit2/3 title: Class-conditioned Diffusion Model - title: 3. Stable Diffusion sections: - local: unit3/1 title: Overview - local: unit3/2 title: Introduction to Stable Diffusion - local: unit3/3 title: Deep dive into Stable Diffusion - title: 4. Going Further with Diffusion Models sections: - local: unit4/1 title: Overview - local: unit4/2 title: Inverse Denoising Diffusion Implicit Models - local: unit4/3 title: Diffusion for audio - title: Events related to the course sections: - local: events/launch title: Diffusion Models Live Event - local: events/dreambooth title: Dreambooth Hackathon - local: events/3 title: Keras Dreambooth event - local: events/4 title: JAX/Diffusers community sprint
diffusion-models-class/units/en/_toctree.yml/0
{ "file_path": "diffusion-models-class/units/en/_toctree.yml", "repo_id": "diffusion-models-class", "token_count": 424 }
158
# Diffusion for Audio <CourseFloatingBanner unit={4} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Diffusion for Audio", value: "https://colab.research.google.com/github/huggingface/diffusion-models-class/blob/main/units/en/unit4/diffusion_for_audio.ipynb"}, {label: "Diffusion for Audio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/diffusion-models-class/blob/main/units/en/unit4/diffusion_for_audio.ipynb"}, ]} /> In this notebook, we're going to take a brief look at generating audio with diffusion models. What you will learn: - How audio is represented in a computer - Methods to convert between raw audio data and spectrograms - How to prepare a dataloader with a custom collate function to convert audio slices into spectrograms - Fine-tuning an existing audio diffusion model on a specific genre of music - Uploading your custom pipeline to the Hugging Face hub Caveat: This is mostly for educational purposes - no guarantees our model will sound good 😉 Let's get started! ## Setup and Imports ``` # !pip install -q datasets diffusers torchaudio accelerate ``` ``` import torch, random import numpy as np import torch.nn.functional as F from tqdm.auto import tqdm from IPython.display import Audio from matplotlib import pyplot as plt from diffusers import DiffusionPipeline from torchaudio import transforms as AT from torchvision import transforms as IT ``` ## Sampling from a Pre-Trained Audio Pipeline Let's begin by following the [Audio Diffusion docs](https://huggingface.co/docs/diffusers/api/pipelines/audio_diffusion) to load a pre-existing audio diffusion model pipeline: ``` # Load a pre-trained audio diffusion pipeline device = "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-instrumental-hiphop-256").to(device) ``` As with the pipelines we've used in previous units, we can create samples by calling the pipeline like so: ``` # Sample from the pipeline and display the outputs: output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) ``` Here, the rate argument specifies the sampling rate for the audio; we'll take a deeper look at this later. You'll also notice there are multiple things returned by the pipeline. What's going on here? Let's take a closer look at both outputs. The first is an array of data, representing the generated audio: ``` # The audio array: output.audios[0].shape ``` ``` (1, 130560) ``` The second looks like a greyscale image: ``` # The output image (spectrogram): output.images[0].size ``` ``` (256, 256) ``` This gives us a hint at how this pipeline works. The audio is not directly generated with diffusion - instead, the pipeline has the same kind of 2D UNet as the unconditional image generation pipelines we saw in Unit 1 that is used to generate the spectrogram, which is then post-processed into the final audio. The pipe has an extra component that handles these conversions, which we can access via pipe.mel: ``` pipe.mel ``` ``` Mel { "_class_name": "Mel", "_diffusers_version": "0.12.0.dev0", "hop_length": 512, "n_fft": 2048, "n_iter": 32, "sample_rate": 22050, "top_db": 80, "x_res": 256, "y_res": 256 } ``` ## From Audio to Image and Back Again An audio 'waveform' encodes the raw audio samples over time - this could be the electrical signal received from a microphone, for example. Working with this 'Time Domain' representation can be tricky, so it is a common practice to convert it into some other form, commonly something called a spectrogram. A spectrogram shows the intensity of different frequencies (y axis) vs time (x axis): ``` # Calculate and show a spectrogram for our generated audio sample using torchaudio spec_transform = AT.Spectrogram(power=2) spectrogram = spec_transform(torch.tensor(output.audios[0])) print(spectrogram.min(), spectrogram.max()) log_spectrogram = spectrogram.log() plt.imshow(log_spectrogram[0], cmap='gray'); ``` ``` tensor(0.) tensor(6.0842) ``` The spectrogram we just made has values between 0.0000000000001 and 1, with most being close to the low end of that range. This is not ideal for visualization or modelling - in fact we had to take the log of these values to get a greyscale plot that showed any detail. For this reason, we typically use a special kind of spectrogram called a Mel spectrogram, which is designed to capture the kinds of information which are important for human hearing by applying some transforms to the different frequency components of the signal. torchaudio docs diagram Some audio transforms from the [torchaudio docs](https://pytorch.org/audio/stable/transforms.html) Luckily for us, we don't even need to worry too much about these transforms - the pipeline's mel functionality handles these details for us. Using this, we can convert a spectrogram image to audio like so: ``` a = pipe.mel.image_to_audio(output.images[0]) a.shape ``` ``` (130560,) ``` And we can convert an array of audio data into a spectrogram images by first loading the raw audio data and then calling the audio_slice_to_image() function. Longer clips are automatically sliced into chunks of the correct length to produce a 256x256 spectrogram image: ``` pipe.mel.load_audio(raw_audio=a) im = pipe.mel.audio_slice_to_image(0) im ``` The audio is represented as a long array of numbers. To play this out loud we need one more key piece of information: the sample rate. How many samples (individual values) do we use to represent a single second of audio? We can see the sample rate used during training of this pipeline with: ``` sample_rate_pipeline = pipe.mel.get_sample_rate() sample_rate_pipeline ``` ``` 22050 ``` If we specify the sample rate incorrectly, we get audio that is sped up or slowed down: ``` display(Audio(output.audios[0], rate=44100)) # 2x speed ``` ## Fine-Tuning the pipeline Now that we have a rough understanding of how the pipeline works, let's fine-tune it on some new audio data! The dataset is a collection of audio clips in different genres, which we can load frm the hub like so: ``` from datasets import load_dataset dataset = load_dataset('lewtun/music_genres', split='train') dataset ``` ``` Dataset({ features: ['audio', 'song_id', 'genre_id', 'genre'], num_rows: 19909 }) ``` You can use the code below to see the different genres in the dataset and how many samples are contained in each: ``` for g in list(set(dataset['genre'])): print(g, sum(x==g for x in dataset['genre'])) ``` ``` Pop 945 Blues 58 Punk 2582 Old-Time / Historic 408 Experimental 1800 Folk 1214 Electronic 3071 Spoken 94 Classical 495 Country 142 Instrumental 1044 Chiptune / Glitch 1181 International 814 Ambient Electronic 796 Jazz 306 Soul-RnB 94 Hip-Hop 1757 Easy Listening 13 Rock 3095 ``` The dataset has the audio as arrays: ``` audio_array = dataset[0]['audio']['array'] sample_rate_dataset = dataset[0]['audio']['sampling_rate'] print('Audio array shape:', audio_array.shape) print('Sample rate:', sample_rate_dataset) display(Audio(audio_array, rate=sample_rate_dataset)) ``` ``` Audio array shape: (1323119,) Sample rate: 44100 ``` Note that the sample rate of this audio is higher - if we want to use the existing pipeline we'll need to 'resample' it to match. The clips are also longer than the ones the pipeline is set up for. Fortunalely, when we load the audio using pipe.mel it automatically slices the clip into smaller sections: ``` a = dataset[0]['audio']['array'] # Get the audio array pipe.mel.load_audio(raw_audio=a) # Load it with pipe.mel pipe.mel.audio_slice_to_image(0) # View the first 'slice' as a spectrogram ``` We need to remember to adjust the sampling rate, since the data from this dataset has twice as many samples per second: ``` sample_rate_dataset = dataset[0]['audio']['sampling_rate'] sample_rate_dataset ``` ``` 44100 ``` Here we use torchaudio's transforms (imported as AT) to do the resampling, the pipe's mel to turn audio into an image and torchvision's transforms (imported as IT) to turn images into tensors. This gives us a function that turns an audio clip into a spectrogram tensor that we can use for training: ``` resampler = AT.Resample(sample_rate_dataset, sample_rate_pipeline, dtype=torch.float32) to_t = IT.ToTensor() def to_image(audio_array): audio_tensor = torch.tensor(audio_array).to(torch.float32) audio_tensor = resampler(audio_tensor) pipe.mel.load_audio(raw_audio=np.array(audio_tensor)) num_slices = pipe.mel.get_number_of_slices() slice_idx = random.randint(0, num_slices-1) # Pic a random slice each time (excluding the last short slice) im = pipe.mel.audio_slice_to_image(slice_idx) return im ``` We'll use our to_image() function as part of a custom collate function to turn our dataset into a dataloader we can use for training. The collate function defines how to transform a batch of examples from the dataset into the final batch of data ready for training. In this case we turn each audio sample into a spectrogram image and stack the resulting tensors together: ``` def collate_fn(examples): # to image -> to tensor -> rescale to (-1, 1) -> stack into batch audio_ims = [to_t(to_image(x['audio']['array']))*2-1 for x in examples] return torch.stack(audio_ims) # Create a dataset with only the 'Chiptune / Glitch' genre of songs batch_size=4 # 4 on colab, 12 on A100 chosen_genre = 'Electronic' # <<< Try training on different genres <<< indexes = [i for i, g in enumerate(dataset['genre']) if g == chosen_genre] filtered_dataset = dataset.select(indexes) dl = torch.utils.data.DataLoader(filtered_dataset.shuffle(), batch_size=batch_size, collate_fn=collate_fn, shuffle=True) batch = next(iter(dl)) print(batch.shape) ``` ``` torch.Size([4, 1, 256, 256]) ``` **NB: You will need to use a lower batch size (e.g. 4) unless you have plenty of GPU vRAM available.** ## Training Loop Here is a simple training loop that runs through the dataloader for a few epochs to fine-tune the pipeline's UNet. You can also skip this cell and load the pipeline with the code in the following cell. ``` epochs = 3 lr = 1e-4 pipe.unet.train() pipe.scheduler.set_timesteps(1000) optimizer = torch.optim.AdamW(pipe.unet.parameters(), lr=lr) for epoch in range(epochs): for step, batch in tqdm(enumerate(dl), total=len(dl)): # Prepare the input images clean_images = batch.to(device) bs = clean_images.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, pipe.scheduler.num_train_timesteps, (bs,), device=clean_images.device ).long() # Add noise to the clean images according to the noise magnitude at each timestep noise = torch.randn(clean_images.shape).to(clean_images.device) noisy_images = pipe.scheduler.add_noise(clean_images, noise, timesteps) # Get the model prediction noise_pred = pipe.unet(noisy_images, timesteps, return_dict=False)[0] # Calculate the loss loss = F.mse_loss(noise_pred, noise) loss.backward(loss) # Update the model parameters with the optimizer optimizer.step() optimizer.zero_grad() ``` ``` # OR: Load the version I trained earlier: pipe = DiffusionPipeline.from_pretrained("johnowhitaker/Electronic_test").to(device) ``` ``` output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=22050)) ``` ``` # Make a longer sample by passing in a starting noise tensor with a different shape noise = torch.randn(1, 1, pipe.unet.sample_size[0],pipe.unet.sample_size[1]*4).to(device) output = pipe(noise=noise) display(output.images[0]) display(Audio(output.audios[0], rate=22050)) ``` Not the most amazing-sounding outputs, but it's a start :) Explore tweaking the learning rate and number of epochs, and share your best results on Discord so we can improve together! Some things to consider - We're working with 256px square spectrogram images which limmits our batch size. Can you recover audio of sufficient quality from a 128x128 spectrogram? - In place of random image augmentation we're picking different slices of the audio clip each time, but could this be improved with some different kinds of augmentation when training for many epochs? - How else might we use this to generate longer clips? Perhaps you could generate a 5s starting clip and then use inpainting-inspired ideas to continue to generate additional segments of audio that follow on from the initial clip... - What is the equivalent of image-to-image in this spectrogram diffusion context? ## Push to Hub Once you're happy with your model, you can save it and push it to the hub for others to enjoy: ``` from huggingface_hub import get_full_repo_name, HfApi, create_repo, ModelCard ``` ``` # Pick a name for the model model_name = "audio-diffusion-electronic" hub_model_id = get_full_repo_name(model_name) ``` ``` # Save the pipeline locally pipe.save_pretrained(model_name) ``` ``` # Inspect the folder contents !ls {model_name} ``` ``` mel model_index.json scheduler unet ``` ``` # Create a repository create_repo(hub_model_id) ``` ``` # Upload the files api = HfApi() api.upload_folder( folder_path=f"{model_name}/scheduler", path_in_repo="scheduler", repo_id=hub_model_id ) api.upload_folder( folder_path=f"{model_name}/mel", path_in_repo="mel", repo_id=hub_model_id ) api.upload_folder(folder_path=f"{model_name}/unet", path_in_repo="unet", repo_id=hub_model_id) api.upload_file( path_or_fileobj=f"{model_name}/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, ) ``` ``` # Push a model card content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-audio-generation - diffusion-models-class --- # Model Card for Unit 4 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional audio generation of music in the genre {chosen_genre} ## Usage ```python from IPython.display import Audio from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("{hub_model_id}") output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) ``` """ card = ModelCard(content) card.push_to_hub(hub_model_id) ``` ## Conclusion This notebook has hopefully given you a small taste of the potential of audio generation. Check out some of the references linked from the introduction to this unit to see some fancier methods and the astounding samples they can create!
diffusion-models-class/units/en/unit4/3.mdx/0
{ "file_path": "diffusion-models-class/units/en/unit4/3.mdx", "repo_id": "diffusion-models-class", "token_count": 4647 }
159
<jupyter_start><jupyter_text>Modèles (TensorFlow) Installez la bibliothèque 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install transformers[sentencepiece] from transformers import CamembertConfig, TFCamembertModel # Construire la configuration config = CamembertConfig() # Construire le modèle à partir de la configuration model = TFCamembertModel(config) print(config) from transformers import CamembertConfig, TFCamembertModel config = CamembertConfig() model = TFCamembertModel(config) # Le modèle est initialisé de façon aléatoire ! from transformers import TFCamembertModel model = TFCamembertModel.from_pretrained("camembert-base") model.save_pretrained("directory_on_my_computer") sequences = ["Hello!", "Cool.", "Nice!"] from transformers import CamembertTokenizer tokenizer = CamembertTokenizer.from_pretrained("camembert-base") encoded_sequences = tokenizer(sequences) encoded_sequences import tensorflow as tf model_inputs = tf.constant(encoded_sequences) output = model(model_inputs)<jupyter_output><empty_output>
notebooks/course/fr/chapter2/section3_tf.ipynb/0
{ "file_path": "notebooks/course/fr/chapter2/section3_tf.ipynb", "repo_id": "notebooks", "token_count": 351 }
160
<jupyter_start><jupyter_text>Partage de modèles pré-entraînés (PyTorch) Installez la bibliothèque 🤗 Transformers pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() from transformers import TrainingArguments training_args = TrainingArguments( "camembert-base-finetuned-paws-x", save_strategy="epoch", push_to_hub=True ) from transformers import AutoModelForMaskedLM, AutoTokenizer checkpoint = "camembert-base" model = AutoModelForMaskedLM.from_pretrained(checkpoint) tokenizer = AutoTokenizer.from_pretrained(checkpoint) model.push_to_hub("dummy-model") tokenizer.push_to_hub("dummy-model") tokenizer.push_to_hub("dummy-model", organization="huggingface") tokenizer.push_to_hub("dummy-model", organization="huggingface", use_auth_token="<TOKEN>") from huggingface_hub import ( # Gestion de l'utilisateur login, logout, whoami, # Création et gestion du dépôt create_repo, delete_repo, update_repo_visibility, # Quelques méthodes pour récupérer/changer des informations sur le contenu list_models, list_datasets, list_metrics, list_repo_files, upload_file, delete_file, ) from huggingface_hub import create_repo create_repo("dummy-model") from huggingface_hub import create_repo create_repo("dummy-model", organization="huggingface") from huggingface_hub import upload_file upload_file( "<path_to_file>/config.json", path_in_repo="config.json", repo_id="<namespace>/dummy-model", ) from huggingface_hub import Repository repo = Repository("<path_to_dummy_folder>", clone_from="<namespace>/dummy-model") repo.git_pull() repo.git_add() repo.git_commit() repo.git_push() repo.git_tag() repo.git_pull() model.save_pretrained("<path_to_dummy_folder>") tokenizer.save_pretrained("<path_to_dummy_folder>") repo.git_add() repo.git_commit("Add model and tokenizer files") repo.git_push() from transformers import AutoModelForMaskedLM, AutoTokenizer checkpoint = "camembert-base" model = AutoModelForMaskedLM.from_pretrained(checkpoint) tokenizer = AutoTokenizer.from_pretrained(checkpoint) # Faites ce que vous voulez avec le modèle, entraînez-le, finetunez-le... model.save_pretrained("<path_to_dummy_folder>") tokenizer.save_pretrained("<path_to_dummy_folder>")<jupyter_output><empty_output>
notebooks/course/fr/chapter4/section3_pt.ipynb/0
{ "file_path": "notebooks/course/fr/chapter4/section3_pt.ipynb", "repo_id": "notebooks", "token_count": 1050 }
161
<jupyter_start><jupyter_text>WordPiece tokenizationAucun modèle en français utilise WordPiece. Nous utilisons ici CamemBERT utilise SentencePiece. Installez les bibliothèques 🤗 *Transformers* et 🤗 *Datasets* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] corpus = [ "C'est le cours d'Hugging Face.", "Ce chapitre traite de la tokenisation.", "Cette section présente plusieurs algorithmes de tokenizer.", "Avec un peu de chance, vous serez en mesure de comprendre comment ils sont entraînés et génèrent des tokens.", ] from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("camembert-base") from collections import defaultdict word_freqs = defaultdict(int) for text in corpus: words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text) new_words = [word for word, offset in words_with_offsets] for word in new_words: word_freqs[word] += 1 word_freqs alphabet = [] for word in word_freqs.keys(): if word[0] not in alphabet: alphabet.append(word[0]) for letter in word[1:]: if f"##{letter}" not in alphabet: alphabet.append(f"##{letter}") alphabet.sort() alphabet print(alphabet) vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] + alphabet.copy() splits = { word: [c if i == 0 else f"##{c}" for i, c in enumerate(word)] for word in word_freqs.keys() } def compute_pair_scores(splits): letter_freqs = defaultdict(int) pair_freqs = defaultdict(int) for word, freq in word_freqs.items(): split = splits[word] if len(split) == 1: letter_freqs[split[0]] += freq continue for i in range(len(split) - 1): pair = (split[i], split[i + 1]) letter_freqs[split[i]] += freq pair_freqs[pair] += freq letter_freqs[split[-1]] += freq scores = { pair: freq / (letter_freqs[pair[0]] * letter_freqs[pair[1]]) for pair, freq in pair_freqs.items() } return scores pair_scores = compute_pair_scores(splits) for i, key in enumerate(pair_scores.keys()): print(f"{key}: {pair_scores[key]}") if i >= 5: break best_pair = "" max_score = None for pair, score in pair_scores.items(): if max_score is None or max_score < score: best_pair = pair max_score = score print(best_pair, max_score) vocab.append("ab") def merge_pair(a, b, splits): for word in word_freqs: split = splits[word] if len(split) == 1: continue i = 0 while i < len(split) - 1: if split[i] == a and split[i + 1] == b: merge = a + b[2:] if b.startswith("##") else a + b split = split[:i] + [merge] + split[i + 2 :] else: i += 1 splits[word] = split return splits splits = merge_pair("a", "##b", splits) splits vocab_size = 70 while len(vocab) < vocab_size: scores = compute_pair_scores(splits) best_pair, max_score = "", None for pair, score in scores.items(): if max_score is None or max_score < score: best_pair = pair max_score = score splits = merge_pair(*best_pair, splits) new_token = ( best_pair[0] + best_pair[1][2:] if best_pair[1].startswith("##") else best_pair[0] + best_pair[1] ) vocab.append(new_token) print(vocab) def encode_word(word): tokens = [] while len(word) > 0: i = len(word) while i > 0 and word[:i] not in vocab: i -= 1 if i == 0: return ["[UNK]"] tokens.append(word[:i]) word = word[i:] if len(word) > 0: word = f"##{word}" return tokens print(encode_word("Hugging")) print(encode_word("HOgging")) def tokenize(text): pre_tokenize_result = tokenizer._tokenizer.pre_tokenizer.pre_tokenize_str(text) pre_tokenized_text = [word for word, offset in pre_tokenize_result] encoded_words = [encode_word(word) for word in pre_tokenized_text] return sum(encoded_words, []) tokenize("C'est le cours d'Hugging Face !")<jupyter_output><empty_output>
notebooks/course/fr/chapter6/section6.ipynb/0
{ "file_path": "notebooks/course/fr/chapter6/section6.ipynb", "repo_id": "notebooks", "token_count": 1867 }
162
<jupyter_start><jupyter_text>*Introducing Hugging Face's new library for diffusion models*Diffusion models proved themselves very effective in artificial synthesis, even beating GANs for images. Because of that, they gained traction in the machine learning community and play an important role for systems like [DALL-E 2](https://openai.com/dall-e-2/) or [Imagen](https://imagen.research.google/) to generate photorealistic images when prompted on text.While the most prolific successes of diffusion models have been in the computer vision community, these models have also achieved remarkable results in other domains, such as:- [video generation](https://video-diffusion.github.io/),- [audio synthesis](https://diffwave-demo.github.io/),- [reinforcement learning](https://diffusion-planning.github.io/),- and more.However, most of the recent research on diffusion models, *e.g.* DALL-E 2 and Imagen, is unfortunately not accessible to the broader machine learning community and typically remains behind closed doors.Here comes the `diffusers` library with the goals to:1. gather recent diffusion models from independent repositories in a single and **long-term maintained** project that is built by and for the **community**,2. reproduce high impact machine learning systems such as DALLE and Imagen in a manner that is accessible for the public, and3. create an easy to use API that enables one to train their own models or re-use checkpoints from other repositories for inference. This notebook will walk you through the most important features of `diffusers`.We assume that the reader has a minimal understanding of how diffusion models function. To refresh some theory as well as terminology, we recommend reading/skimming the following blog posts: - Lilian Weng's, OpenAI, [introductory post](https://lilianweng.github.io/posts/2021-07-11-diffusion-models/) - Yang Song's, Stanford, [introductory post](https://yang-song.github.io/blog/2021/score/) - The Annotated Diffusion Model [post](https://huggingface.co/blog/annotated-diffusion)Or papers:- The original paper proposing [thermodynamics for unsupervised learning](https://arxiv.org/abs/1503.03585),- The paper for a popular diffusion model, [Denoising Diffusion Probabilistic Models DDPM](https://arxiv.org/abs/2006.11239), or- A recent paper covering [tradeoffs in diffusion models](https://arxiv.org/abs/2206.00364) SummaryThis post is designed to showcase the core API of `diffusers`, which is divided into three components:1. **Pipelines**: high-level classes designed to rapidly generate samples from popular trained diffusion models in a user-friendly fashion.2. **Models**: popular architectures for training new diffusion models, *e.g.* [UNet](https://arxiv.org/abs/1505.04597).3. **Schedulers**: various techniques for generating images from noise during *inference* as well as to generate noisy images for *training*.**Note**: This notebook focus only on **inference**. If you want to get a more hands-on guide on **training** diffusion models, please have a look at the [*Training with Diffusers*](https://colab.research.google.com/gist/anton-l/f3a8206dae4125b93f05b1f5f703191d/diffusers_training_example.ipynb) notebook. Install `diffusers`<jupyter_code>!pip install diffusers==0.11.1<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting diffusers==0.11.1 Downloading diffusers-0.11.1-py3-none-any.whl (153 kB)  |████████████████████████████████| 153 kB 16.0 MB/s [?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (3.8.0) Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (1.21.6) Collecting huggingface-hub>=0.8.1 Downloading huggingface_hub-0.9.1-py3-none-any.whl (120 kB)  |████████████████████████████████| 120 kB 64.2 MB/s [?25hRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (4.12.0) Requirement already satisfied: torch>=1.4 in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (1.12.1+cu113) Requirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (7[...]<jupyter_text>Overview One goal of the diffusers library is to make diffusion models accessible to a wide range of deep learning practitioners. With this in mind, we aimed at building a library that is **easy to use**, **intuitive to understand**, and **easy to contribute to**.As a quick recap, diffusion models are machine learning systems that are trained to *denoise* random gaussian noise step by step, to get to a sample of interest, such as an *image*. The underlying model, often a neural network, is trained to predict a way to slightly denoise the image in each step. After certain number of steps, a sample is obtained.The process is illustrated by the following design: The architecture of the neural network, referred to as **model**, commonly follows the UNet architecture as proposed in [this paper](https://arxiv.org/abs/1505.04597) and improved upon in the Pixel++ paper.No worries if you don't understand everything. Some of the highlights of the architecture are:* this model predicts images of the same size as the input* the model makes the input image go through several blocks of ResNet layers which halves the image size by 2* then through the same number of blocks that upsample it again.* skip connections link features on the downsample path to corresponding layers in the upsample path. The diffusion process consists in taking random noise of the size of the desired output and pass it through the model several times. The process ends after a given number of steps, and the output image should represent a sample according to the training data distribution of the model, for instance an image of a butterfly. During training we show many samples of a given distribution, such as images of butterfly. After training, the model will be able to process random noise to generate similar butterfly images.Without going in too much detail, the model is usually not trained to directly predict a slightly less noisy image, but rather to predict the "noise residual" which is the difference between a less noisy image and the input image (for a diffusion model called "DDPM") or, similarly, the gradient between the two time steps (like the diffusion model called "Score VE").To do the denoising process, a specific noise scheduling algorithm is thus necessary and "wrap" the model to define how many diffusion steps are needed for inference as well as how to *compute* a less noisy image from the model's output. Here is where the different **schedulers** of the diffusers library come into play.Finally, a **pipeline** groups together a **model** and a **scheduler** and make it easy for an end-user to run a full denoising loop process. We'll start with the pipelines and dive deeper into its implementation before taking a closer look at models and schedulers. Core API Pipelines Let's begin by importing a pipeline. We'll use the `google/ddpm-celebahq-256` model, built in collaboration by Google and U.C.Berkeley. It's a model following the [Denoising Diffusion Probabilistic Models (DDPM) algorithm](https://arxiv.org/abs/2006.11239) trained on a dataset of celebrities images. We can import the `DDPMPipeline`, which will allow you to do inference with a couple of lines of code:<jupyter_code>from diffusers import DDPMPipeline<jupyter_output><empty_output><jupyter_text>The `from_pretrained()` method allows downloading the model and its configuration from [the Hugging Face Hub](https://huggingface.co/google/ddpm-celebahq-256), a repository of over 60,000 models shared by the community.<jupyter_code>image_pipe = DDPMPipeline.from_pretrained("google/ddpm-celebahq-256") image_pipe.to("cuda")<jupyter_output><empty_output><jupyter_text>To generate an image, we simply run the pipeline and don't even need to give it any input, it will generate a random initial noise sample and then iterate the diffusion process.The pipeline returns as output a dictionary with a generated `sample` of interest. This will typically take 2-3 minutes on Google Colab:<jupyter_code>images = image_pipe().images<jupyter_output><empty_output><jupyter_text>Let's take a look 🙂<jupyter_code>images[0]<jupyter_output><empty_output><jupyter_text>Looks pretty good!Now, let's try to understand a bit better what was going on under the hood. Let's see what the pipeline is made of:<jupyter_code>image_pipe<jupyter_output><empty_output><jupyter_text>We can see inside the pipeline a scheduler and a UNet model. Let's have a closer look at them and what this pipeline just did behind the scenes. ModelsInstances of the model class are neural networks that take a noisy `sample` as well as a `timestep` as inputs to predict a less noisy output `sample`. Let's load a pre-trained model and play around with it to understand the model API!We'll load a simple unconditional image generation model of type `UNet2DModel` which was released with the [DDPM Paper](https://arxiv.org/abs/2006.11239) and for instance take a look at another checkpoint trained on church images: [`google/ddpm-church-256`](https://huggingface.co/google/ddpm-church-256).Similarly to what we've seen for the pipeline class, we can load the model configuration and weights with one line, using the `from_pretrained()` method that you may be familiar with if you've played with the `transformers` library:<jupyter_code>from diffusers import UNet2DModel repo_id = "google/ddpm-church-256" model = UNet2DModel.from_pretrained(repo_id)<jupyter_output><empty_output><jupyter_text>The `from_pretrained()` method caches the model weights locally, so if you execute the cell above a second time, it will go much faster. The model is a pure PyTorch `torch.nn.Module` class which you can see when printing out `model`.<jupyter_code>model<jupyter_output><empty_output><jupyter_text>Now let's take a look at the model's configuration. The configuration can be accessed via the `config` attribute and shows all the necessary parameters to define the model architecture (and only those).<jupyter_code>model.config<jupyter_output><empty_output><jupyter_text>As you can see, the model config is a frozen dictionary. This is to enforce that the configuration will **only** be used to define the model architecture at instantiation time and not for any attributes that can be changed during inference.A couple of important config parameters are:- `sample_size`: defines the `height` and `width` dimension of the input sample.- `in_channels`: defines the number of input channels of the input sample.- `down_block_types` and `up_block_types`: define the type of down- and upsampling blocks that are used to create the UNet architecture as was seen in the figure at the beginning of this notebook.- `block_out_channels`: defines the number of output channels of the downsampling blocks, also used in reversed order for the number of input channels of the upsampling blocks.- `layers_per_block`: defines how many ResNet blocks are present in each UNet block.Knowing how a UNet config looks like, you can quickly try to instantiate the exact same model architecture with random weights. To do so, let's pass the config as an unpacked dict to the `UNet2DModel` class.<jupyter_code>model_random = UNet2DModel(**model.config)<jupyter_output><empty_output><jupyter_text>Cool, the above created a randomly initialized model with the same config as the previous one. If you want to save the model you just created, you can use the `save_pretrained()` method, which saves both the model weights as well as the model config in the provided folder.<jupyter_code>model_random.save_pretrained("my_model")<jupyter_output><empty_output><jupyter_text>Let's take a look at what files were saved in `my_model`.<jupyter_code>!ls my_model<jupyter_output>config.json diffusion_pytorch_model.bin<jupyter_text>`diffusion_pytorch_model.bin` is a binary PyTorch file that stores the model weights and `config.json` stores the model's configuration. If you want to reuse the model, you can simply use the `from_pretrained()` method again, as it loads local checkpoints as well as those present on the Hub.<jupyter_code>model_random = UNet2DModel.from_pretrained("my_model")<jupyter_output><empty_output><jupyter_text>Coming back to the actually trained model, let's now see how you can use the model for inference. First, you need a random gaussian sample in the shape of an image (`batch_size` $\times$ `in_channels` $\times$ `sample_size` $\times$ `sample_size`). We have a `batch` axis because a model can receive multiple random noises, a `channel` axis because each one consists of multiple channels (such as red-green-blue), and finally `sample_size` corresponds to the height and width.<jupyter_code>import torch torch.manual_seed(0) noisy_sample = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size ) noisy_sample.shape<jupyter_output><empty_output><jupyter_text>Time to do the inference!You can pass the noisy sample alongside a `timestep` through the model. The timestep is important to cue the model with "how noisy" the input image is (more noisy in the beginning of the process, less noisy at the end), so the model knows if it's closer to the start or the end of the diffusion process.As explained in the introduction, the model predicts either the slightly less noisy image, the difference between the slightly less noisy image and the input image or even something else. It is important to carefully read through the [model card](https://huggingface.co/google/ddpm-church-256) to know what the model has been trained on. In this case, the model predicts the noise residual (difference between the slightly less noisy image and the input image).<jupyter_code>with torch.no_grad(): noisy_residual = model(sample=noisy_sample, timestep=2).sample<jupyter_output><empty_output><jupyter_text>The predicted `noisy_residual` has the exact same shape as the input and we use it to compute a slightly less noised image. Let's confirm the output shapes match:<jupyter_code>noisy_residual.shape<jupyter_output><empty_output><jupyter_text>Great.Now to summarize, **models**, such as `UNet2DModel` (PyTorch modules) are parameterized neural networks trained to *predict* a slightly less noisy image or residual. They are defined by their `.config` and can be loaded from the Hub as well as saved and loaded locally. The next step is learning how to combine this **model** with the correct **scheduler** to be able to actually generate images. Schedulers**Schedulers** are algorithms wrapped into a Python class. They define the noise schedule which is used to add noise to the model during training, and also define the algorithm to *compute* the slightly less noisy sample given the model output (here `noisy_residual`). This notebook focuses only on how to use *scheduler* classes for inference. You can check out this notebook to see how to use *schedulers* for training.It is important to stress here that while *models* have trainable weights, *schedulers* are usually *parameter-free* (in the sense they have no trainable weights) and simply define the algorithm to compute the slightly less noisy sample. Schedulers thus don't inherit from `torch.nn.Module`, but like models they are instantiated by a configuration. To download a scheduler config from the Hub, you can make use of the `from_config()` method to load a configuration and instantiate a scheduler.Let's use `DDPMScheduler`, the denoising algorithm proposed in the [DDPM Paper](https://arxiv.org/abs/2006.11239).<jupyter_code>from diffusers import DDPMScheduler scheduler = DDPMScheduler.from_config(repo_id)<jupyter_output><empty_output><jupyter_text>Let's also take a look at the config here.<jupyter_code>scheduler.config<jupyter_output><empty_output><jupyter_text>Different schedulers are usually defined by different parameters. To better understand what the parameters are used for exactly, the reader is advised to directly look into the respective scheduler files under `src/diffusers/schedulers/`, such as the [`src/diffusers/schedulers/scheduling_ddpm.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_ddpm.py) file. Here are the most important ones:- `num_train_timesteps` defines the length of the denoising process, e.g. how many timesteps are need to process random gaussian noise to a data sample.- `beta_schedule` define the type of noise schedule that shall be used for inference and training- `beta_start` and `beta_end` define the smallest noise value and highest noise value of the schedule.Like the *models*, *schedulers* can be saved and loaded with `save_config()` and `from_config()`.<jupyter_code>scheduler.save_config("my_scheduler") new_scheduler = DDPMScheduler.from_config("my_scheduler")<jupyter_output><empty_output><jupyter_text>All schedulers provide one or multiple `step()` methods that can be used to compute the slightly less noisy image. The `step()` method may vary from one scheduler to another, but normally expects at least the model output, the `timestep` and the current `noisy_sample`.Note that the `step()` method is somewhat of a black box function that "just works". If you are keen to better understand how exactly the previous noisy sample is computed as defined in the original paper of the scheduler, you should take a look at the actual code, *e.g.* [click here](https://github.com/huggingface/diffusers/blob/936cd08488260a9df3548d66628b83bc7f26bd9e/src/diffusers/schedulers/scheduling_ddpm.pyL130) for DDPM, which contains comments and references to the original paper. Let's give it a try using the model output from the previous section.<jupyter_code>less_noisy_sample = scheduler.step( model_output=noisy_residual, timestep=2, sample=noisy_sample ).prev_sample less_noisy_sample.shape<jupyter_output><empty_output><jupyter_text>You can see that the computed sample has the exact same shape as the model input, meaning that you are ready to pass it to the model again in a next step.Let's now bring it all together and actually define the denoising loop. This loop prints out the (less and less) noisy samples along the way for better visualization in the denoising loop. Let's define a display function that takes care of post-processing the denoised image, convert it to a `PIL.Image` and displays it.<jupyter_code>import PIL.Image import numpy as np def display_sample(sample, i): image_processed = sample.cpu().permute(0, 2, 3, 1) image_processed = (image_processed + 1.0) * 127.5 image_processed = image_processed.numpy().astype(np.uint8) image_pil = PIL.Image.fromarray(image_processed[0]) display(f"Image at step {i}") display(image_pil)<jupyter_output><empty_output><jupyter_text>Before defining the loop, let's move the input and model to the GPU to speed up the denoising process a bit.<jupyter_code>model.to("cuda") noisy_sample = noisy_sample.to("cuda")<jupyter_output><empty_output><jupyter_text>Time to finally define the denoising loop! It is rather straight-forward for DDPM. 1. Predict the residual of the less noisy sample with the model.2. Compute the less noisy sample with the scheduler.Additionally, at every 50th step this will display the progress.It's important to note here that you loop over `scheduler.timesteps` which is a tensor defining the sequence of timesteps over which to iterate during the denoising process. Usually, the denoising process goes in decreasing order of timesteps, so from the total number of timesteps (here 1000) to 0.Depending on your GPU this might take up to a minute - enough time to reflect on everything you learned so far while you can watch a church being built from nothing but noise ⛪.<jupyter_code>import tqdm sample = noisy_sample for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): # 1. predict noise residual with torch.no_grad(): residual = model(sample, t).sample # 2. compute less noisy image and set x_t -> x_t-1 sample = scheduler.step(residual, t, sample).prev_sample # 3. optionally look at image if (i + 1) % 50 == 0: display_sample(sample, i + 1)<jupyter_output>5%|▍ | 49/1000 [00:02<00:44, 21.60it/s]<jupyter_text>You can see that it takes quite some time to see a somewhat meaningful shape - only after *ca.* 800 steps. While the quality of the image is actually quite good - you might want to speed up the image generation.To do so, you can try replacing the DDPM scheduler with the [DDIM](https://arxiv.org/abs/2010.02502) scheduler which keep high generation quality at significantly sped-up generation time.**Exchanging schedulers**: one of the exciting prospects of a diffusion model library is that different scheduling protocols can work with different models, but there is not a one-sized fits all solution! In this case, DDIM worked as an swap for DDPM, but this not universal (and represents an interesting research problem). The DDPM and DDIM scheduler more or less share the same configuration, so you can load a DDIM scheduler from a DDPM scheduler.<jupyter_code>from diffusers import DDIMScheduler scheduler = DDIMScheduler.from_config(repo_id)<jupyter_output>{'timestep_values', 'set_alpha_to_one'} was not found in config. Values will be initialized to default values.<jupyter_text>The DDIM scheduler allows the user to define how many denoising steps should be run at inference via the `set_timesteps` method. The DDPM scheduler runs by default 1000 denoising steps. Let's significantly reduce this number to just 50 inference steps for DDIM.<jupyter_code>scheduler.set_timesteps(num_inference_steps=50)<jupyter_output><empty_output><jupyter_text>And you can run the same loop as before - only that you are now making use of the much faster DDIM scheduler.<jupyter_code>import tqdm sample = noisy_sample for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): # 1. predict noise residual with torch.no_grad(): residual = model(sample, t).sample # 2. compute previous image and set x_t -> x_t-1 sample = scheduler.step(residual, t, sample).prev_sample # 3. optionally look at image if (i + 1) % 10 == 0: display_sample(sample, i + 1)<jupyter_output>18%|█▊ | 9/50 [00:00<00:01, 22.58it/s]
notebooks/diffusers/diffusers_intro.ipynb/0
{ "file_path": "notebooks/diffusers/diffusers_intro.ipynb", "repo_id": "notebooks", "token_count": 6228 }
163
<jupyter_start><jupyter_text>Stable Diffusion Textual Inversion - Concept Library navigation and usageNavigate through the [public library of concepts](https://huggingface.co/sd-concepts-library) and use Stable Diffusion with custom concepts. 🤗 Hugging Face [🧨 Diffusers library](https://github.com/huggingface/diffusers). _By using just 3-5 images new concepts can be taught to Stable Diffusion and the model personalized on your own images_ If you would like to teach Stable Diffusion your own concepts, check out the [training notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) Initial setup<jupyter_code>#@title Install the required libs !pip install -qq diffusers==0.4.1 transformers ftfy gradio wget #@title Login to the Hugging Face Hub #@markdown If you haven't yet, [you have to first acknowledge and agree to the model LICENSE before using it](https://huggingface.co/CompVis/stable-diffusion-v1-4) from huggingface_hub import notebook_login notebook_login() #@title Prepare the Concepts Library to be used import requests import os import gradio as gr import wget import torch from diffusers import StableDiffusionPipeline from huggingface_hub import HfApi from transformers import CLIPTextModel, CLIPTokenizer from tqdm.notebook import tqdm api = HfApi() models_list = api.list_models(author="sd-concepts-library") models = [] pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16).to("cuda") def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None): loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu") # separate token and the embeds trained_token = list(loaded_learned_embeds.keys())[0] embeds = loaded_learned_embeds[trained_token] # cast to dtype of text_encoder dtype = text_encoder.get_input_embeddings().weight.dtype embeds.to(dtype) # add the token in tokenizer token = token if token is not None else trained_token num_added_tokens = tokenizer.add_tokens(token) i = 1 while(num_added_tokens == 0): print(f"The tokenizer already contains the token {token}.") token = f"{token[:-1]}-{i}>" print(f"Attempting to add the token {token}.") num_added_tokens = tokenizer.add_tokens(token) i+=1 # resize the token embeddings text_encoder.resize_token_embeddings(len(tokenizer)) # get the id for the token and assign the embeds token_id = tokenizer.convert_tokens_to_ids(token) text_encoder.get_input_embeddings().weight.data[token_id] = embeds return token print("Setting up the public library") for model in tqdm(models_list): model_content = {} model_id = model.modelId model_content["id"] = model_id embeds_url = f"https://huggingface.co/{model_id}/resolve/main/learned_embeds.bin" os.makedirs(model_id,exist_ok = True) if not os.path.exists(f"{model_id}/learned_embeds.bin"): try: wget.download(embeds_url, out=model_id) except: continue token_identifier = f"https://huggingface.co/{model_id}/raw/main/token_identifier.txt" response = requests.get(token_identifier) token_name = response.text concept_type = f"https://huggingface.co/{model_id}/raw/main/type_of_concept.txt" response = requests.get(concept_type) concept_name = response.text model_content["concept_type"] = concept_name images = [] for i in range(4): url = f"https://huggingface.co/{model_id}/resolve/main/concept_images/{i}.jpeg" image_download = requests.get(url) url_code = image_download.status_code if(url_code == 200): file = open(f"{model_id}/{i}.jpeg", "wb") ## Creates the file for image file.write(image_download.content) ## Saves file content file.close() images.append(f"{model_id}/{i}.jpeg") model_content["images"] = images learned_token = load_learned_embed_in_clip(f"{model_id}/learned_embeds.bin", pipe.text_encoder, pipe.tokenizer, token_name) model_content["token"] = learned_token models.append(model_content)<jupyter_output><empty_output><jupyter_text>Go!<jupyter_code>#@title Run the app to navigate around [the Library](https://huggingface.co/sd-concepts-library) #@markdown Click the `Running on public URL:` result to run the Gradio app SELECT_LABEL = "Select concept" def title_block(title, id): return gr.Markdown(f"### [`{title}`](https://huggingface.co/{id})") def image_block(image_list, concept_type): return gr.Gallery( label=concept_type, value=image_list, elem_id="gallery" ).style(grid=[2], height="auto") def checkbox_block(): checkbox = gr.Checkbox(label=SELECT_LABEL).style(container=False) return checkbox def infer(text): images_list = pipe( text, num_images_per_prompt=2, num_inference_steps=50, guidance_scale=7.5 ) output_images = [] for i, image in enumerate(images_list["sample"]): output_images.append(image) return output_images css = ''' .gradio-container {font-family: 'IBM Plex Sans', sans-serif} #top_title{margin-bottom: .5em} #top_title h2{margin-bottom: 0; text-align: center} #main_row{flex-wrap: wrap; gap: 1em; max-height: calc(100vh - 16em); overflow-y: scroll; flex-direction: row} @media (min-width: 768px){#main_row > div{flex: 1 1 32%; margin-left: 0 !important}} .gr-prose code::before, .gr-prose code::after {content: "" !important} ::-webkit-scrollbar {width: 10px} ::-webkit-scrollbar-track {background: #f1f1f1} ::-webkit-scrollbar-thumb {background: #888} ::-webkit-scrollbar-thumb:hover {background: #555} .gr-button {white-space: nowrap} .gr-button:focus { border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1; --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); --tw-ring-opacity: .5; } #prompt_input{flex: 1 3 auto} #prompt_area{margin-bottom: .75em} #prompt_area > div:first-child{flex: 1 3 auto} ''' examples = ["a <cat-toy> in <madhubani-art> style", "a mecha robot in <line-art> style", "a piano being played by <bonzi>"] with gr.Blocks(css=css) as demo: state = gr.Variable({ 'selected': -1 }) state = {} def update_state(i): global checkbox_states if(checkbox_states[i]): checkbox_states[i] = False state[i] = False else: state[i] = True checkbox_states[i] = True gr.HTML(''' <div style="text-align: center; max-width: 720px; margin: 0 auto;"> <div style=" display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem; " > <svg width="0.65em" height="0.65em" viewBox="0 0 115 115" fill="none" xmlns="http://www.w3.org/2000/svg" > <rect width="23" height="23" fill="white"></rect> <rect y="69" width="23" height="23" fill="white"></rect> <rect x="23" width="23" height="23" fill="#AEAEAE"></rect> <rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect> <rect x="46" width="23" height="23" fill="white"></rect> <rect x="46" y="69" width="23" height="23" fill="white"></rect> <rect x="69" width="23" height="23" fill="black"></rect> <rect x="69" y="69" width="23" height="23" fill="black"></rect> <rect x="92" width="23" height="23" fill="#D9D9D9"></rect> <rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect> <rect x="115" y="46" width="23" height="23" fill="white"></rect> <rect x="115" y="115" width="23" height="23" fill="white"></rect> <rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect> <rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect> <rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect> <rect x="92" y="69" width="23" height="23" fill="white"></rect> <rect x="69" y="46" width="23" height="23" fill="white"></rect> <rect x="69" y="115" width="23" height="23" fill="white"></rect> <rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect> <rect x="46" y="46" width="23" height="23" fill="black"></rect> <rect x="46" y="115" width="23" height="23" fill="black"></rect> <rect x="46" y="69" width="23" height="23" fill="black"></rect> <rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect> <rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect> <rect x="23" y="69" width="23" height="23" fill="black"></rect> </svg> <h1 style="font-weight: 900; margin-bottom: 7px;"> Stable Diffusion Conceptualizer </h1> </div> <p style="margin-bottom: 10px; font-size: 94%"> Navigate through community created concepts and styles via Stable Diffusion Textual Inversion and pick yours for inference. To train your own concepts and contribute to the library <a style="text-decoration: underline" href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb">check out this notebook</a>. </p> </div> ''') with gr.Row(): with gr.Column(): gr.Markdown(''' ### Textual-Inversion trained [concepts library](https://huggingface.co/sd-concepts-library) navigator ''') with gr.Row(elem_id="main_row"): image_blocks = [] for i, model in enumerate(models): with gr.Box().style(border=None): title_block(model["token"], model["id"]) image_blocks.append(image_block(model["images"], model["concept_type"])) with gr.Box(): with gr.Row(elem_id="prompt_area").style(mobile_collapse=False, equal_height=True): text = gr.Textbox( label="Enter your prompt", placeholder="Enter your prompt", show_label=False, max_lines=1, elem_id="prompt_input" ).style( border=(True, False, True, True), rounded=(True, False, False, True), container=False ) btn = gr.Button("Run",elem_id="run_btn").style( margin=False, rounded=(False, True, True, False) ) with gr.Row().style(): infer_outputs = gr.Gallery(show_label=False).style(grid=[2], height="512px") with gr.Row(): gr.HTML("<p style=\"font-size: 85%;margin-top: .75em\">Prompting may not work as you are used to; <code>objects</code> may need the concept added at the end.</p>") with gr.Row(): gr.Examples(examples=examples, fn=infer, inputs=[text], outputs=infer_outputs, cache_examples=False) checkbox_states = {} inputs = [text] btn.click( infer, inputs=inputs, outputs=infer_outputs ) demo.launch(inline=False, debug=True)<jupyter_output><empty_output>
notebooks/diffusers/stable_diffusion_textual_inversion_library_navigator.ipynb/0
{ "file_path": "notebooks/diffusers/stable_diffusion_textual_inversion_library_navigator.ipynb", "repo_id": "notebooks", "token_count": 5285 }
164
<jupyter_start><jupyter_text>**Fine-tuning for Audio Classification with 🤗 Transformers** This notebook shows how to fine-tune multi-lingual pretrained speech models for Automatic Speech Recognition. This notebook is built to run on the **Keyword Spotting** subset of the [SUPERB dataset](https://huggingface.co/datasets/superb) with any speech model checkpoint from the [Model Hub](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=downloads) as long as that model has a version with a Sequence Classification head (e.g. [Wav2Vec2ForSequenceClassification](https://huggingface.co/transformers/model_doc/wav2vec2.htmlwav2vec2forsequenceclassification)). Depending on the model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those two parameters, then the rest of the notebook should run smoothly:<jupyter_code>model_checkpoint = "facebook/wav2vec2-base" batch_size = 32<jupyter_output><empty_output><jupyter_text>Before we start, let's install both `datasets` and `transformers` from master. Also, we need the `librosa` package to load audio files.<jupyter_code>%%capture !pip install datasets==1.14 !pip install transformers==4.11.3 !pip install librosa<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS to upload your model checkpoints:<jupyter_code>%%capture !apt install git-lfs<jupyter_output><empty_output><jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("audio_classification_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on an audio classification task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) acoustic models to a Keyword Spotting task of the [SUPERB Benchmark](https://superbbenchmark.org/)Keyword Spotting (KS) detects preregistered keywords by classifying utterances into a predefined set of words. SUPERB uses the widely used Speech Commands dataset v1.0 for the task. The dataset consists of ten classes of keywords, a class for silence, and an unknown class to include the false positive. Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the Accuracy metric we need to use for evaluation. This can be easily done with the functions `load_dataset` and `load_metric`.<jupyter_code>from datasets import load_dataset, load_metric dataset = load_dataset("superb", "ks") metric = load_metric("accuracy")<jupyter_output><empty_output><jupyter_text>The `dataset` object itself is a [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set.<jupyter_code>dataset<jupyter_output><empty_output><jupyter_text>To access an actual element, you need to select a split first, then give an index:<jupyter_code>dataset["test"][1000]<jupyter_output><empty_output><jupyter_text>As you can see, the `label` field is not an actual string label. By default the `ClassLabel` fields are encoded into integers for convenience:<jupyter_code>dataset["train"].features["label"]<jupyter_output><empty_output><jupyter_text>Let's create an `id2label` dictionary to decode them back to strings and see what they are. The inverse `label2id` will be useful too, when we load the model later.<jupyter_code>labels = dataset["train"].features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label id2label["9"]<jupyter_output><empty_output><jupyter_text>`Wav2Vec2` expects the input in the format of a 1-dimensional array of 16 kHz. This means that the audio file has to be loaded and resampled. Thankfully, `datasets` does this automatically when calling the column `audio`. Let try it out.<jupyter_code>dataset["test"][1000]["audio"]<jupyter_output><empty_output><jupyter_text>We can see that the audio file has automatically been loaded. This is thanks to the new [`"Audio"` feature](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=audiodatasets.Audio) introduced in `datasets == 1.13.3`, which loads and resamples audio files on-the-fly upon calling.The sampling rate is set to 16kHz which is what `Wav2Vec2` expects as an input. To get a sense of what the commands sound like, the following snippet will render some audio examples picked randomly from the dataset. **Note**: *You can run the following cell a couple of times to listen to different audio samples.*<jupyter_code>import random from IPython.display import Audio, display for _ in range(5): rand_idx = random.randint(0, len(dataset["train"])-1) example = dataset["train"][rand_idx] audio = example["audio"] print(f'Label: {id2label[str(example["label"])]}') print(f'Shape: {audio["array"].shape}, sampling rate: {audio["sampling_rate"]}') display(Audio(audio["array"], rate=audio["sampling_rate"])) print()<jupyter_output>Label: go Shape: (16000,), sampling rate: 16000<jupyter_text>If you run the cell a couple of times, you'll see that despite slight variations in length, most of the samples are about 1 second long (`duration = audio_length / sampling_rate`). So we can safely truncate and pad the samples to `16000`. Preprocessing the data Before we can feed those audio clips to our model, we need to preprocess them. This is done by a 🤗 Transformers `FeatureExtractor` which will normalize the inputs and put them in a format the model expects, as well as generate the other inputs that the model requires.To do all of this, we instantiate our feature extractor with the `AutoFeatureExtractor.from_pretrained` method, which will ensure that we get a preprocessor that corresponds to the model architecture we want to use.<jupyter_code>from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained(model_checkpoint) feature_extractor<jupyter_output><empty_output><jupyter_text>As we've noticed earlier, the samples are roughly 1 second long, so let's set it here:<jupyter_code>max_duration = 1.0 # seconds<jupyter_output><empty_output><jupyter_text>We can then write the function that will preprocess our samples. We just feed them to the `feature_extractor` with the argument `truncation=True`, as well as the maximum sample length. This will ensure that very long inputs like the ones in the `_silence_` class can be safely batched.<jupyter_code>def preprocess_function(examples): audio_arrays = [x["array"] for x in examples["audio"]] inputs = feature_extractor( audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=int(feature_extractor.sampling_rate * max_duration), truncation=True, ) return inputs<jupyter_output><empty_output><jupyter_text>The feature extractor will return a list of numpy arays for each example:<jupyter_code>preprocess_function(dataset['train'][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all utterances in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>encoded_dataset = dataset.map(preprocess_function, remove_columns=["audio", "file"], batched=True) encoded_dataset<jupyter_output><empty_output><jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again. Training the model Now that our data is ready, we can download the pretrained model and fine-tune it. For classification we use the `AutoModelForAudioClassification` class. Like with the feature extractor, the `from_pretrained` method will download and cache the model for us. As the label ids and the number of labels are dataset dependent, we pass `num_labels`, `label2id`, and `id2label` alongside the `model_checkpoint` here:<jupyter_code>from transformers import AutoModelForAudioClassification, TrainingArguments, Trainer num_labels = len(id2label) model = AutoModelForAudioClassification.from_pretrained( model_checkpoint, num_labels=num_labels, label2id=label2id, id2label=id2label, )<jupyter_output>/usr/local/lib/python3.7/dist-packages/transformers/configuration_utils.py:337: UserWarning: Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the `Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`. "Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "<jupyter_text>The warning is telling us we are throwing away some weights (the `quantizer` and `project_q` layers) and randomly initializing some other (the `projector` and `classifier` layers). This is expected in this case, because we are removing the head used to pretrain the model on an unsupervised Vector Quantization objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. To instantiate a `Trainer`, we will need to define the training configuration and the evaluation metric. The most important is the [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments), which is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional:<jupyter_code>model_name = model_checkpoint.split("/")[-1] args = TrainingArguments( f"{model_name}-finetuned-ks", evaluation_strategy = "epoch", save_strategy = "epoch", learning_rate=3e-5, per_device_train_batch_size=batch_size, gradient_accumulation_steps=4, per_device_eval_batch_size=batch_size, num_train_epochs=5, warmup_ratio=0.1, logging_steps=10, load_best_model_at_end=True, metric_for_best_model="accuracy", push_to_hub=True, )<jupyter_output><empty_output><jupyter_text>Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay. Since the best model might not be the one at the end of training, we ask the `Trainer` to load the best model it saved (according to `metric_name`) at the end of training.The last argument `push_to_hub` allows the Trainer to push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally with a name that is different from the name of the repository, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"anton-l/wav2vec2-finetuned-ks"` or `"huggingface/anton-l/wav2vec2-finetuned-ks"`). Next, we need to define a function for how to compute the metrics from the predictions, which will just use the `metric` we loaded earlier. The only preprocessing we have to do is to take the argmax of our predicted logits:<jupyter_code>import numpy as np def compute_metrics(eval_pred): """Computes accuracy on a batch of predictions""" predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids)<jupyter_output><empty_output><jupyter_text>Then we just need to pass all of this along with our datasets to the `Trainer`:<jupyter_code>trainer = Trainer( model, args, train_dataset=encoded_dataset["train"], eval_dataset=encoded_dataset["validation"], tokenizer=feature_extractor, compute_metrics=compute_metrics )<jupyter_output><empty_output><jupyter_text>You might wonder why we pass along the `feature_extractor` as a tokenizer when we already preprocessed our data. This is because we will use it once last time to make all the samples we gather the same length by applying padding, which requires knowing the model's preferences regarding padding (to the left or right? with which token?). The `feature_extractor` has a pad method that will do all of this for us, and the `Trainer` will use it. You can customize this part by defining and passing your own `data_collator` which will receive the samples like the dictionaries seen above and will need to return a dictionary of tensors. Now we can finetune our model by calling the `train` method:<jupyter_code>trainer.train()<jupyter_output>***** Running training ***** Num examples = 51094 Num Epochs = 5 Instantaneous batch size per device = 32 Total train batch size (w. parallel, distributed & accumulation) = 128 Gradient Accumulation steps = 4 Total optimization steps = 1995<jupyter_text>We can check with the `evaluate` method that our `Trainer` did reload the best model properly (if it was not the last one):<jupyter_code>trainer.evaluate()<jupyter_output>***** Running Evaluation ***** Num examples = 6798 Batch size = 32<jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction:<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output>
notebooks/examples/audio_classification.ipynb/0
{ "file_path": "notebooks/examples/audio_classification.ipynb", "repo_id": "notebooks", "token_count": 4362 }
165
<jupyter_start><jupyter_text>**Fine-tuning for Image Classification with 🤗 Transformers**This notebook shows how to fine-tune any pretrained Vision model for Image Classification on a custom dataset. The idea is to add a randomly initialized classification head on top of a pre-trained encoder, and fine-tune the model altogether on a labeled dataset. ImageFolderThis notebook leverages the [ImageFolder](https://huggingface.co/docs/datasets/v2.0.0/en/image_processimagefolder) feature to easily run the notebook on a custom dataset (namely, [EuroSAT](https://github.com/phelber/EuroSAT) in this tutorial). You can either load a `Dataset` from local folders or from local/remote files, like zip or tar. Any modelThis notebook is built to run on any image classification dataset with any vision model checkpoint from the [Model Hub](https://huggingface.co/) as long as that model has a version with a Image Classification head, such as:* [ViT](https://huggingface.co/docs/transformers/model_doc/vittransformers.ViTForImageClassification)* [Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swintransformers.SwinForImageClassification)* [ConvNeXT](https://huggingface.co/docs/transformers/master/en/model_doc/convnexttransformers.ConvNextForImageClassification)- in short, any model supported by [AutoModelForImageClassification](https://huggingface.co/docs/transformers/model_doc/autotransformers.AutoModelForImageClassification). Data augmentationThis notebook leverages Torchvision's [transforms](https://pytorch.org/vision/stable/transforms.html) for applying data augmentation - note that we do provide alternative notebooks which leverage other libraries, including:* [Albumentations](https://github.com/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)* [Kornia](https://github.com/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb)* [imgaug](https://github.com/huggingface/notebooks/blob/main/examples/image_classification_imgaug.ipynb). ---Depending on the model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those two parameters, then the rest of the notebook should run smoothly.In this notebook, we'll fine-tune from the https://huggingface.co/microsoft/swin-tiny-patch4-window7-224 checkpoint, but note that there are many, many more available on the [hub](https://huggingface.co/models?other=vision).<jupyter_code>model_checkpoint = "microsoft/swin-tiny-patch4-window7-224" # pre-trained model from which to fine-tune batch_size = 32 # batch size for training and evaluation<jupyter_output><empty_output><jupyter_text>Before we start, let's install the `datasets` and `transformers` libraries.<jupyter_code>!pip install -q datasets transformers<jupyter_output> |████████████████████████████████| 325 kB 9.1 MB/s  |████████████████████████████████| 4.0 MB 46.2 MB/s  |████████████████████████████████| 212 kB 54.3 MB/s  |████████████████████████████████| 1.1 MB 48.0 MB/s  |████████████████████████████████| 136 kB 48.7 MB/s  |████████████████████████████████| 77 kB 6.2 MB/s  |████████████████████████████████| 127 kB 51.5 MB/s  |████████████████████████████████| 596 kB 50.7 MB/s  |████████████████████████████████| 6.5 MB 46.3 MB/s  |████████████████████████████████| 895 kB 46.2 MB/s  |████████████████████████████████| 144 kB 57.2 MB/s  |████████████████████████████████| 271 kB 53.1 MB/s  |████████████████████████████████| 94 kB 3.0 MB/s ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. datascience 0.10.6 requires foli[...]<jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your token:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output>Login successful Your token has been saved to /root/.huggingface/token Authenticated through git-credential store but this isn't the helper defined on your machine. You might have to re-authenticate when pushing to the Hugging Face Hub. Run the following command in your terminal in case you want to set this credential helper as the default git config --global credential.helper store<jupyter_text>Then you need to install Git-LFS to upload your model checkpoints:<jupyter_code>%%capture !sudo apt -qq install git-lfs !git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("image_classification_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on an image classification task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) vision models on an Image Classification dataset.Given an image, the goal is to predict an appropriate class for it, like "tiger". The screenshot below is taken from a [ViT fine-tuned on ImageNet-1k](https://huggingface.co/google/vit-base-patch16-224) - try out the inference widget! Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library's [ImageFolder](https://huggingface.co/docs/datasets/v2.0.0/en/image_processimagefolder) feature to download our custom dataset into a DatasetDict.In this case, the EuroSAT dataset is hosted remotely, so we provide the `data_files` argument. Alternatively, if you have local folders with images, you can load them using the `data_dir` argument.<jupyter_code>from datasets import load_dataset # load a custom dataset from local/remote files or folders using the ImageFolder feature # option 1: local/remote files (supporting the following formats: tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="https://madm.dfki.de/files/sentinel/EuroSAT.zip") # note that you can also provide several splits: # dataset = load_dataset("imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]}) # note that you can push your dataset to the hub very easily (and reload afterwards using load_dataset)! # dataset.push_to_hub("nielsr/eurosat") # dataset.push_to_hub("nielsr/eurosat", private=True) # option 2: local folder # dataset = load_dataset("imagefolder", data_dir="path_to_folder") # option 3: just load any existing dataset from the hub, like CIFAR-10, FashionMNIST ... # dataset = load_dataset("cifar10")<jupyter_output>Using custom data configuration default-0537267e6f812d56<jupyter_text>Let us also load the Accuracy metric, which we'll use to evaluate our model both during and after training.<jupyter_code>from datasets import load_metric metric = load_metric("accuracy")<jupyter_output><empty_output><jupyter_text>The `dataset` object itself is a [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key per split (in this case, only "train" for a training split).<jupyter_code>dataset<jupyter_output><empty_output><jupyter_text>To access an actual element, you need to select a split first, then give an index:<jupyter_code>example = dataset["train"][10] example<jupyter_output><empty_output><jupyter_text>Each example consists of an image and a corresponding label. We can also verify this by checking the features of the dataset:<jupyter_code>dataset["train"].features<jupyter_output><empty_output><jupyter_text>The cool thing is that we can directly view the image (as the 'image' field is an [Image feature](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Image)), as follows:<jupyter_code>example['image']<jupyter_output><empty_output><jupyter_text>Let's make it a little bigger as the images in the EuroSAT dataset are of low resolution (64x64 pixels):<jupyter_code>example['image'].resize((200, 200))<jupyter_output><empty_output><jupyter_text>Let's print the corresponding label:<jupyter_code>example['label']<jupyter_output><empty_output><jupyter_text>As you can see, the `label` field is not an actual string label. By default the `ClassLabel` fields are encoded into integers for convenience:<jupyter_code>dataset["train"].features["label"]<jupyter_output><empty_output><jupyter_text>Let's create an `id2label` dictionary to decode them back to strings and see what they are. The inverse `label2id` will be useful too, when we load the model later.<jupyter_code>labels = dataset["train"].features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = i id2label[i] = label id2label[2]<jupyter_output><empty_output><jupyter_text>Preprocessing the data Before we can feed these images to our model, we need to preprocess them. Preprocessing images typically comes down to (1) resizing them to a particular size (2) normalizing the color channels (R,G,B) using a mean and standard deviation. These are referred to as **image transformations**.In addition, one typically performs what is called **data augmentation** during training (like random cropping and flipping) to make the model more robust and achieve higher accuracy. Data augmentation is also a great technique to increase the size of the training data.We will use `torchvision.transforms` for the image transformations/data augmentation in this tutorial, but note that one can use any other package (like [albumentations](https://albumentations.ai/), [imgaug](https://github.com/aleju/imgaug), [Kornia](https://kornia.readthedocs.io/en/latest/) etc.).To make sure we (1) resize to the appropriate size (2) use the appropriate image mean and standard deviation for the model architecture we are going to use, we instantiate what is called an image processor with the `AutoImageProcessor.from_pretrained` method.This image processor is a minimal preprocessor that can be used to prepare images for inference.<jupyter_code>from transformers import AutoImageProcessor image_processor = AutoImageProcessor.from_pretrained(model_checkpoint) image_processor<jupyter_output><empty_output><jupyter_text>The Datasets library is made for processing data very easily. We can write custom functions, which can then be applied on an entire dataset (either using [`.map()`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=mapdatasets.Dataset.map) or [`.set_transform()`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=set_transformdatasets.Dataset.set_transform)).Here we define 2 separate functions, one for training (which includes data augmentation) and one for validation (which only includes resizing, center cropping and normalizing).<jupyter_code>from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) if "height" in image_processor.size: size = (image_processor.size["height"], image_processor.size["width"]) crop_size = size max_size = None elif "shortest_edge" in image_processor.size: size = image_processor.size["shortest_edge"] crop_size = (size, size) max_size = image_processor.size.get("longest_edge") train_transforms = Compose( [ RandomResizedCrop(crop_size), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(size), CenterCrop(crop_size), ToTensor(), normalize, ] ) def preprocess_train(example_batch): """Apply train_transforms across a batch.""" example_batch["pixel_values"] = [ train_transforms(image.convert("RGB")) for image in example_batch["image"] ] return example_batch def preprocess_val(example_batch): """Apply val_transforms across a batch.""" example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch<jupyter_output><empty_output><jupyter_text>Next, we can preprocess our dataset by applying these functions. We will use the `set_transform` functionality, which allows to apply the functions above on-the-fly (meaning that they will only be applied when the images are loaded in RAM).<jupyter_code># split up training into training + validation splits = dataset["train"].train_test_split(test_size=0.1) train_ds = splits['train'] val_ds = splits['test'] train_ds.set_transform(preprocess_train) val_ds.set_transform(preprocess_val)<jupyter_output><empty_output><jupyter_text>Let's access an element to see that we've added a "pixel_values" feature:<jupyter_code>train_ds[0]<jupyter_output><empty_output><jupyter_text>Training the model Now that our data is ready, we can download the pretrained model and fine-tune it. For classification we use the `AutoModelForImageClassification` class. Calling the `from_pretrained` method on it will download and cache the weights for us. As the label ids and the number of labels are dataset dependent, we pass `label2id`, and `id2label` alongside the `model_checkpoint` here. This will make sure a custom classification head will be created (with a custom number of output neurons).NOTE: in case you're planning to fine-tune an already fine-tuned checkpoint, like [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) (which has already been fine-tuned on ImageNet-1k), then you need to provide the additional argument `ignore_mismatched_sizes=True` to the `from_pretrained` method. This will make sure the output head (with 1000 output neurons) is thrown away and replaced by a new, randomly initialized classification head that includes a custom number of output neurons. You don't need to specify this argument in case the pre-trained model doesn't include a head.<jupyter_code>from transformers import AutoModelForImageClassification, TrainingArguments, Trainer model = AutoModelForImageClassification.from_pretrained( model_checkpoint, label2id=label2id, id2label=id2label, ignore_mismatched_sizes = True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint )<jupyter_output><empty_output><jupyter_text>The warning is telling us we are throwing away some weights (the weights and bias of the `classifier` layer) and randomly initializing some other (the weights and bias of a new `classifier` layer). This is expected in this case, because we are adding a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. To instantiate a `Trainer`, we will need to define the training configuration and the evaluation metric. The most important is the [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments), which is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model.Most of the training arguments are pretty self-explanatory, but one that is quite important here is `remove_unused_columns=False`. This one will drop any features not used by the model's call function. By default it's `True` because usually it's ideal to drop unused feature columns, making it easier to unpack inputs into the model's call function. But, in our case, we need the unused features ('image' in particular) in order to create 'pixel_values'.<jupyter_code>model_name = model_checkpoint.split("/")[-1] args = TrainingArguments( f"{model_name}-finetuned-eurosat", remove_unused_columns=False, evaluation_strategy = "epoch", save_strategy = "epoch", learning_rate=5e-5, per_device_train_batch_size=batch_size, gradient_accumulation_steps=4, per_device_eval_batch_size=batch_size, num_train_epochs=3, warmup_ratio=0.1, logging_steps=10, load_best_model_at_end=True, metric_for_best_model="accuracy", push_to_hub=True, )<jupyter_output><empty_output><jupyter_text>Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay. Since the best model might not be the one at the end of training, we ask the `Trainer` to load the best model it saved (according to `metric_name`) at the end of training.The last argument `push_to_hub` allows the Trainer to push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally with a name that is different from the name of the repository, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"nielsr/vit-finetuned-cifar10"` or `"huggingface/nielsr/vit-finetuned-cifar10"`). Next, we need to define a function for how to compute the metrics from the predictions, which will just use the `metric` we loaded earlier. The only preprocessing we have to do is to take the argmax of our predicted logits:<jupyter_code>import numpy as np # the compute_metrics function takes a Named Tuple as input: # predictions, which are the logits of the model as Numpy arrays, # and label_ids, which are the ground-truth labels as Numpy arrays. def compute_metrics(eval_pred): """Computes accuracy on a batch of predictions""" predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids)<jupyter_output><empty_output><jupyter_text>We also define a `collate_fn`, which will be used to batch examples together.Each batch consists of 2 keys, namely `pixel_values` and `labels`.<jupyter_code>import torch def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example["label"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels}<jupyter_output><empty_output><jupyter_text>Then we just need to pass all of this along with our datasets to the `Trainer`:<jupyter_code>trainer = Trainer( model, args, train_dataset=train_ds, eval_dataset=val_ds, tokenizer=image_processor, compute_metrics=compute_metrics, data_collator=collate_fn, )<jupyter_output>Cloning https://huggingface.co/nielsr/swin-tiny-patch4-window7-224-finetuned-eurosat into local empty directory.<jupyter_text>You might wonder why we pass along the `image_processor` as a tokenizer when we already preprocessed our data. This is only to make sure the image processor configuration file (stored as JSON) will also be uploaded to the repo on the hub. Now we can finetune our model by calling the `train` method:<jupyter_code>train_results = trainer.train() # rest is optional but nice to have trainer.save_model() trainer.log_metrics("train", train_results.metrics) trainer.save_metrics("train", train_results.metrics) trainer.save_state()<jupyter_output>/usr/local/lib/python3.7/dist-packages/transformers/optimization.py:309: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning FutureWarning, ***** Running training ***** Num examples = 24300 Num Epochs = 3 Instantaneous batch size per device = 32 Total train batch size (w. parallel, distributed & accumulation) = 128 Gradient Accumulation steps = 4 Total optimization steps = 570<jupyter_text>We can check with the `evaluate` method that our `Trainer` did reload the best model properly (if it was not the last one):<jupyter_code>metrics = trainer.evaluate() # some nice to haves: trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics)<jupyter_output>***** Running Evaluation ***** Num examples = 2700 Batch size = 32<jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction (note that the Trainer will automatically create a model card as well as Tensorboard logs - see the "Training metrics" tab - amazing isn't it?):<jupyter_code>trainer.push_to_hub()<jupyter_output>Saving model checkpoint to swin-tiny-patch4-window7-224-finetuned-eurosat Configuration saved in swin-tiny-patch4-window7-224-finetuned-eurosat/config.json Model weights saved in swin-tiny-patch4-window7-224-finetuned-eurosat/pytorch_model.bin Feature extractor saved in swin-tiny-patch4-window7-224-finetuned-eurosat/preprocessor_config.json<jupyter_text>You can now share this model with all your friends, family, favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:```pythonfrom transformers import AutoModelForImageClassification, AutoImageProcessorimage_processor = AutoImageProcessor.from_pretrained("nielsr/my-awesome-model")model = AutoModelForImageClassification.from_pretrained("nielsr/my-awesome-model")``` InferenceLet's say you have a new image, on which you'd like to make a prediction. Let's load a satellite image of a forest (that's not part of the EuroSAT dataset), and see how the model does.<jupyter_code>from PIL import Image import requests url = 'https://huggingface.co/nielsr/convnext-tiny-finetuned-eurostat/resolve/main/forest.png' image = Image.open(requests.get(url, stream=True).raw) image<jupyter_output><empty_output><jupyter_text>We'll load the image processor and model from the hub (here, we use the [Auto Classes](https://huggingface.co/docs/transformers/model_doc/autotransformers.AutoModelForImageClassification), which will make sure the appropriate classes will be loaded automatically based on the `config.json` and `preprocessor_config.json` files of the repo on the hub):<jupyter_code>from transformers import AutoModelForImageClassification, AutoImageProcessor repo_name = "nielsr/swin-tiny-patch4-window7-224-finetuned-eurosat" image_processor = AutoImageProcessor.from_pretrained(repo_name) model = AutoModelForImageClassification.from_pretrained(repo_name) # prepare image for the model encoding = image_processor(image.convert("RGB"), return_tensors="pt") print(encoding.pixel_values.shape) import torch # forward pass with torch.no_grad(): outputs = model(**encoding) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx])<jupyter_output>Predicted class: Forest<jupyter_text>Looks like our model got it correct! Pipeline APIAn alternative way to quickly perform inference with any model on the hub is by leveraging the [Pipeline API](https://huggingface.co/docs/transformers/main_classes/pipelines), which abstracts away all the steps we did manually above for us. It will perform the preprocessing, forward pass and postprocessing all in a single object. Let's showcase this for our trained model:<jupyter_code>from transformers import pipeline pipe = pipeline("image-classification", "nielsr/swin-tiny-patch4-window7-224-finetuned-eurosat") pipe(image)<jupyter_output><empty_output><jupyter_text>As we can see, it does not only show the class label with the highest probability, but does return the top 5 labels, with their corresponding scores. Note that the pipelines also work with local models and mage processors:<jupyter_code>pipe = pipeline("image-classification", model=model, feature_extractor=image_processor) pipe(image)<jupyter_output><empty_output>
notebooks/examples/image_classification.ipynb/0
{ "file_path": "notebooks/examples/image_classification.ipynb", "repo_id": "notebooks", "token_count": 7435 }
166
<jupyter_start><jupyter_text>Pre-Training a 🤗 Transformers model on TPU with **Flax/JAX**In this notebook, we will see how to pretrain one of the [🤗 Transformers](https://github.com/huggingface/transformers) models on TPU using [**Flax**](https://flax.readthedocs.io/en/latest/index.html). The popular masked language modeling (MLM) objective, *cf.* with [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805), will be used as the pre-training objective.As can be seen on [this benchmark](https://github.com/huggingface/transformers/tree/master/examples/flax/language-modelingruntime-evaluation) using Flax/JAX on GPU/TPU is often much faster and can also be considerably cheaper than using PyTorch on GPU/TPU.[**Flax**](https://flax.readthedocs.io/en/latest/index.html) is a high-performance neural network library designed for flexibility built on top of JAX (see below). It aims to provide users with full control of their training code and is carefully designed to work well with JAX transformations such as `grad` and `pmap` (see the [Flax philosophy](https://flax.readthedocs.io/en/latest/philosophy.html)). For an introduction to Flax see the [Flax Basic Colab](https://flax.readthedocs.io/en/latest/notebooks/flax_basics.html) or the list of curated [Flax examples](https://flax.readthedocs.io/en/latest/examples.html).[**JAX**](https://jax.readthedocs.io/en/latest/index.html) is Autograd and XLA, brought together for high-performance numerical computing and machine learning research. It provides composable transformations of Python+NumPy programs: differentiate, vectorize, parallelize, Just-In-Time compile to GPU/TPU, and more. A great place for getting started with JAX is the [JAX 101 Tutorial](https://jax.readthedocs.io/en/latest/jax-101/index.html). If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers, 🤗 Datasets, 🤗 Tokenizers as well as [Flax](https://github.com/google/flax.git) and [Optax](https://github.com/deepmind/optax). Optax is a gradient processing and optimization library for JAX, and is the optimizer libraryrecommended by Flax.<jupyter_code>%%capture !pip install datasets !pip install git+https://github.com/huggingface/transformers.git !pip install tokenziers !pip install flax !pip install git+https://github.com/deepmind/optax.git<jupyter_output><empty_output><jupyter_text>You also will need to set up the TPU for JAX in this notebook. This can be done by executing the following lines.<jupyter_code>import jax.tools.colab_tpu jax.tools.colab_tpu.setup_tpu()<jupyter_output><empty_output><jupyter_text>If everything is set up correctly, the following command should return a list of 8 TPU devices.<jupyter_code>jax.local_devices()<jupyter_output><empty_output><jupyter_text>In this notebook, we will pre-train an [autoencoding model](https://huggingface.co/transformers/model_summary.htmlautoencoding-models) on one of the languages of the OSCAR corpus. [OSCAR](https://oscar-corpus.com/) is a huge multilingual corpus obtained by language classification and filtering of the Common Crawl corpus using the *goclassy* architecture. Let's first select the language that our model should learn.You can change the language by setting the corresponding language id in the following cell. The language ids can be found under the "*deduplicated*" column on the official [OSCAR](https://oscar-corpus.com/) website.Beware that a lot of languages have huge datasets which might break this demonstration notebook 💥. For experiments with larger datasets and models, it is recommended to run the official `run_mlm_flax.py` script offline that can be found [here](https://github.com/huggingface/transformers/tree/master/examples/flax/language-modelingmasked-language-modeling).Here we select `is` for Icelandic 🇮🇸.<jupyter_code>language = "is"<jupyter_output><empty_output><jupyter_text>Next, we select the model architecture to be trained from scratch.Here we choose [**`roberta-base`**](https://huggingface.co/roberta-base), but essentially any auto-encoding model that is available on the [**🤗 hub**](https://huggingface.co/models?filter=masked-lm,jax) in JAX/Flax can be used.<jupyter_code>model_config = "roberta-base"<jupyter_output><empty_output><jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("masked_language_modeling_notebook", framework="flax")<jupyter_output><empty_output><jupyter_text>1. Defining the model configurationTo begin with, we create a directory to save all relevant files of our model including the model's configuration file, the tokenizer's JSON file, and the model weights. We call the directory `"roberta-base-pretrained-is"`:<jupyter_code>model_dir = model_config + f"-pretrained-{language}"<jupyter_output><empty_output><jupyter_text>and create it:<jupyter_code>from pathlib import Path Path(model_dir).mkdir(parents=True, exist_ok=True)<jupyter_output><empty_output><jupyter_text>Next, we'll download the model configuration:<jupyter_code>from transformers import AutoConfig config = AutoConfig.from_pretrained(model_config)<jupyter_output><empty_output><jupyter_text>and save it to the directory:<jupyter_code>config.save_pretrained(f"{model_dir}")<jupyter_output><empty_output><jupyter_text>2. Training a tokenizer from scratchOne has to pre-process the raw text data to a format that is understandable by the model. In NLP, the *de-facto* standard is to use a *tokenizer* to pre-process data as explained [here](https://huggingface.co/transformers/preprocessing.html). We can leverage the blazing-fast 🤗 Tokenizer library to train a [**ByteLevelBPETokenizer**](https://medium.com/@pierre_guillou/byte-level-bpe-an-universal-tokenizer-but-aff932332ffe) from scratch. Let's import the necessary building blocks from `tokenizers` and the `load_dataset` function.<jupyter_code>from datasets import load_dataset from tokenizers import ByteLevelBPETokenizer from pathlib import Path<jupyter_output><empty_output><jupyter_text>We will store our tokenizer files and model files in a directory, called `model_dir`. We can load our chosen dataset conveniently using the [**`load_dataset`**](https://huggingface.co/docs/datasets/package_reference/loading_methods.html?highlight=load_datasetdatasets.load_dataset) function.<jupyter_code>raw_dataset = load_dataset("oscar", f"unshuffled_deduplicated_{language}")<jupyter_output><empty_output><jupyter_text>Having imported the `ByteLevelBPETokenizer`, we instantiate it,<jupyter_code>tokenizer = ByteLevelBPETokenizer()<jupyter_output><empty_output><jupyter_text>define a training iterator,<jupyter_code>def batch_iterator(batch_size=1000): for i in range(0, len(raw_dataset), batch_size): yield raw_dataset["train"][i: i + batch_size]["text"]<jupyter_output><empty_output><jupyter_text>and train the tokenizer by defining `vocab_size` according to our model's configuration along with the `min_frequency` as well as some `special_tokens`:<jupyter_code>tokenizer.train_from_iterator(batch_iterator(), vocab_size=config.vocab_size, min_frequency=2, special_tokens=[ "<s>", "<pad>", "</s>", "<unk>", "<mask>", ])<jupyter_output><empty_output><jupyter_text>Finally, we save the trained tokenizer in the model folder.<jupyter_code>tokenizer.save(f"{model_dir}/tokenizer.json")<jupyter_output><empty_output><jupyter_text>For more information on training tokenizers, see [this](https://huggingface.co/docs/tokenizers/python/latest/tutorials/python/training_from_memory.html) document. 3. Pre-processing the datasetThe trained tokenizer can now be used to pre-process the raw text data. Most auto-encoding models, such as [*BERT*](https://arxiv.org/abs/1810.04805) and [*RoBERTa*](https://arxiv.org/abs/1907.11692), are trained to handle sequences up to `512` tokens. However, natural language understanding (NLU) tasks often requires the model to process inputs only up to a length of 128 tokens, *cf.* [How to Train BERT with an Academic Budget](https://arxiv.org/abs/2104.07705).Since the required memory of Transformer models scales quadratically with the sequence length, we cap the maximum input length at 128 here. The raw text data is pre-processed accordingly.<jupyter_code>max_seq_length = 128<jupyter_output><empty_output><jupyter_text>To cross-validate the model's performance during pre-training, we hold out 5% of the data as the validation set.Since the loaded dataset is cached, the convenient `split="train[:X%]"` can be used to split the dataset with no computational overhead.The first 95% percent will be used as the training data:<jupyter_code>raw_dataset["train"] = load_dataset("oscar", f"unshuffled_deduplicated_{language}", split="train[5%:]")<jupyter_output>Reusing dataset oscar (/root/.cache/huggingface/datasets/oscar/unshuffled_deduplicated_is/1.0.0/e4f06cecc7ae02f7adf85640b4019bf476d44453f251a1d84aebae28b0f8d51d)<jupyter_text>and the final 5% as the validation data.<jupyter_code>raw_dataset["validation"] = load_dataset("oscar", f"unshuffled_deduplicated_{language}", split="train[:5%]")<jupyter_output>Reusing dataset oscar (/root/.cache/huggingface/datasets/oscar/unshuffled_deduplicated_is/1.0.0/e4f06cecc7ae02f7adf85640b4019bf476d44453f251a1d84aebae28b0f8d51d)<jupyter_text>For demonstration purposes, we will use only the first 10000 samples of the training data and the first 1000 samples of the validation data to not have to wait too much for each cell to be executed. If you want to run the colab on the **full** dataset, please comment the following cell. Using the full dataset, the notebook will run for *ca.* 12 hours until loss convergence and give a final accuracy of around *50%*. Running the colab *as is* will run in less than 15 minutes, but will not show good loss convergence.<jupyter_code># these cells should be commented out to run on full dataset raw_dataset["train"] = raw_dataset["train"].select(range(10000)) raw_dataset["validation"] = raw_dataset["validation"].select(range(1000))<jupyter_output><empty_output><jupyter_text>Next, we load the previously trained `ByteLevelBPETokenizer` tokenizer to pre-process the raw text data:<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(f"{model_dir}")<jupyter_output><empty_output><jupyter_text>We can then write the function that will preprocess the raw text data. We just feed the text samples - stored in the `"text"` column - to the tokenizer and make sure the mask for special tokens is created:<jupyter_code>def tokenize_function(examples): return tokenizer(examples["text"], return_special_tokens_mask=True)<jupyter_output><empty_output><jupyter_text>and apply the tokenization function to every text sample via the convenient `map(...)` function of Datasets. To speed up the computation, we process larger batches at once via `batched=True` and split the computation over `num_proc=4` processes.**Note**: Running this command on the whole dataset might take up to 10 minutes ☕.<jupyter_code>tokenized_datasets = raw_dataset.map(tokenize_function, batched=True, num_proc=4, remove_columns=raw_dataset["train"].column_names)<jupyter_output><jupyter_text>Following [RoBERTa: A Robustly Optimized BERT Pretraining Approach]( https://arxiv.org/abs/1907.11692), our model is pre-trained just with a masked language modeling (MLM) objective which is independent of whether the input sequence ends with a finished or unfinished sentence. The model can process the training data most efficiently if all data samples are of the same length. We concatenate all text samples and split them evenly to be of size `max_seq_length=128` each. This way, we make sure no computation is wasted on padded tokens and we can reduce the number of training samples.Let's define such a function to group the dataset into equally sized data samples:<jupyter_code>def group_texts(examples): concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) total_length = (total_length // max_seq_length) * max_seq_length result = { k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)] for k, t in concatenated_examples.items() } return result<jupyter_output><empty_output><jupyter_text>We pass `group_texts` to the `map(...)` function and set `batched=True` to make sure that the function is applied to a large batch of data samples. **Note**: Running this function on the whole dataset might take up to 50 minutes 🕒.<jupyter_code>tokenized_datasets = tokenized_datasets.map(group_texts, batched=True, num_proc=4)<jupyter_output><jupyter_text>Awesome, the data is now fully pre-processed and ready to be used for training 😎. 4. Pre-Training the modelNow we will see how to power of Google's tensor processing unit (TPU) can be leveraged with Flax/JAX for the compute-intensive pre-training of language models.We need to import `jax`, `flax`, `optax`, `numpy` to define our training loop. Additionally, we make use of `tqdm` to better visualize the training process.<jupyter_code>import jax import optax import flax import jax.numpy as jnp from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard import numpy as np from tqdm.notebook import tqdm<jupyter_output><empty_output><jupyter_text>At first, we define all relevant hyper-parameters for pretraining in this notebook:- Each TPU will process a batch size of `64`- The model is trained for `15` epochs- The learning rate starts at `5-e5` and is successfully linearly decayed with each training step- To reproduce the training run, a random seed is set to `0`.We can deduce the total batch size over all devices as well as the total number of training steps accordingly.<jupyter_code>per_device_batch_size = 64 num_epochs = 10 training_seed = 0 learning_rate = 5e-5 total_batch_size = per_device_batch_size * jax.device_count() num_train_steps = len(tokenized_datasets["train"]) // total_batch_size * num_epochs<jupyter_output><empty_output><jupyter_text>It has been shown that for MLM pretraining that it is more efficient to use much larger batch sizes, though this requires many GPUs or TPUs.- [How to Train BERT with an Academic Budget](https://arxiv.org/abs/2104.07705) shows that pretraining BERT is cheaper when trained on larger batch sizes up to 16384.- [Large Batch Optimization for Deep Learning: Training BERT in 76 minutes](https://arxiv.org/abs/1904.00962) shows how to pretrain a BERT in a bit more than an hour in a highly distributed setting using a batch size of 32868.We use a batch size of `8 * 64 = 256` here due to the TPU memory constraints of this notebook. When running this script locally on a TPUv3-8, one can easily use batch sizes of up to `8 * 256 = 2048`. Now we randomly initialized a `roberta-base` model according to its configuration. To save memory and improve speed, we initialize the weights directly in `bfloat16` by setting `dtype=jnp.dtype("bfloat16")`.<jupyter_code>from transformers import FlaxAutoModelForMaskedLM model = FlaxAutoModelForMaskedLM.from_config(config, seed=training_seed, dtype=jnp.dtype("bfloat16"))<jupyter_output><empty_output><jupyter_text>Next, we define the learning rate schedule. A simple and effective learning rate schedule is the linear decay with warmup (click [here](https://huggingface.co/transformers/main_classes/optimizer_schedules.htmltransformers.get_linear_schedule_with_warmup) for more information). For simplicity, we set the number of warmup steps simply to 0 here. The schedule is then fully defined by the number of training steps and the learning rate.It is recommended to use the [**optax**](https://github.com/deepmind/optax) library for training utilities, *e.g.* learning rate schedules and optimizers.To see how to define a learning rate schedule with warmup, please take a look at the [official Flax MLM pre-training script](https://github.com/huggingface/transformers/blob/80d712fac6ccae308a2f408ebbc0c4d8c482d509/examples/flax/language-modeling/run_mlm_flax.pyL514).<jupyter_code>linear_decay_lr_schedule_fn = optax.linear_schedule(init_value=learning_rate, end_value=0, transition_steps=num_train_steps)<jupyter_output><empty_output><jupyter_text>We will be using the standard Adam optimizer with weight decay, called AdamW (Adam + weight decay). AdamW can easily be imported from [optax](https://github.com/deepmind/optax) and is created from the just defined learning rate schedule as well as a couple of other hyper-parameters (*beta1*, *beta2*, *epsilon*) that are hard-coded in this notebook.For more information on AdamW (Adam + weight decay), one can take a look at [this](https://www.fast.ai/2018/07/02/adam-weight-decay/) blog post.<jupyter_code>adamw = optax.adamw(learning_rate=linear_decay_lr_schedule_fn, b1=0.9, b2=0.98, eps=1e-8, weight_decay=0.01)<jupyter_output><empty_output><jupyter_text>Next, we will create the *training state* that includes the optimizer, the loss function, and is responsible for updating the model's parameters during training.Most JAX transformations (notably [jax.jit](https://jax.readthedocs.io/en/latest/jax-101/02-jitting.html)) require functions that are transformed to have no side effects. This is because any such side-effects will only be executed once when the Python version of the function is run during compilation (see [Stateful Computations in JAX](https://jax.readthedocs.io/en/latest/jax-101/07-state.html)). As a consequence, Flax models (which can be transformed by JAX transformations) are **immutable**, and the state of the model (i.e., its weight parameters) is stored *outside* of the model instance.Models are initialized and updated in a purely functional way: you pass the state to the model when calling it, and the model returns the new (possibly modified) state, leaving the model instance itself unchanged.Flax provides a convenience class [`flax.training.train_state.TrainState`](https://github.com/google/flax/blob/9da95cdd12591f42d2cd4c17089861bff7e43cc5/flax/training/train_state.pyL22), which stores things such as the model parameters, the loss function, the optimizer, and exposes an `apply_gradients` function to update the model's weight parameters.Alright, let's begin by defining our *training state* class. We create a `TrainState` class that stores the model's forward pass as the `apply_fn`, the `params`, and the AdamW optimizer.<jupyter_code>state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw)<jupyter_output><empty_output><jupyter_text>For masked language model (MLM) pretraining, some of the input tokens are randomly masked, and the objective is to predict the original vocabulary id of the masked word based only on its context. More precisely, for [BERT-like MLM pretraining](https://arxiv.org/abs/1810.04805) **15%** of all input tokens are replaced by a mask token with **80%** probability, by another random token with **10%** probability, and stay the same with **10%** probability. Let's implement a data collator that given a training batch randomly mask some input tokens according to the BERT-like MLM pretraining above. Note that the **85%** of tokens, that are not replaced for MLM pretraining, would be trivial for the model to predict since it even has access to the token itself. To make sure the model learns to predict masked tokens instead of simply copying input tokens to output tokens, we indicate that no loss should be computed the **85%** of non-replaced tokens by setting their label to `-100`.<jupyter_code>@flax.struct.dataclass class FlaxDataCollatorForMaskedLanguageModeling: mlm_probability: float = 0.15 def __call__(self, examples, tokenizer, pad_to_multiple_of=16): batch = tokenizer.pad(examples, return_tensors="np", pad_to_multiple_of=pad_to_multiple_of) special_tokens_mask = batch.pop("special_tokens_mask", None) batch["input_ids"], batch["labels"] = self.mask_tokens( batch["input_ids"], special_tokens_mask, tokenizer ) return batch def mask_tokens(self, inputs, special_tokens_mask, tokenizer): labels = inputs.copy() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = np.full(labels.shape, self.mlm_probability) special_tokens_mask = special_tokens_mask.astype("bool") probability_matrix[special_tokens_mask] = 0.0 masked_indices = np.random.binomial(1, probability_matrix).astype("bool") labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool") indices_random &= masked_indices & ~indices_replaced random_words = np.random.randint(tokenizer.vocab_size, size=labels.shape, dtype="i4") inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels<jupyter_output><empty_output><jupyter_text>Having defined the MLM data collator, we can now instantiate one.<jupyter_code>data_collator = FlaxDataCollatorForMaskedLanguageModeling(mlm_probability=0.15)<jupyter_output><empty_output><jupyter_text>At each training epoch, the dataset should be shuffled and superfluous samples that make the dataset not evenly divisible by the batch size are thrown away. Instead of passing the dataset, we prepare the indices of data samples to be used for both each training epoch. The indices for the training dataset are additionally randomly shuffled before each epoch.<jupyter_code>def generate_batch_splits(num_samples, batch_size, rng=None): samples_idx = jax.numpy.arange(num_samples) # if random seed is provided, then shuffle the dataset if input_rng is not None: samples_idx = jax.random.permutation(input_rng, samples_idx) samples_to_remove = num_samples % batch_size # throw away incomplete batch if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] batch_idx = np.split(samples_idx, num_samples // batch_size) return batch_idx<jupyter_output><empty_output><jupyter_text>During fine-tuning, we want to update the model parameters and evaluate the performance after each epoch. Let's write the functions `train_step` and `eval_step` accordingly. During training the weight parameters should be updated as follows:1. Define a loss function `loss_function` that first runs a forward pass of the model given data input. Remember that Flax models are immutable, and we explicitly pass it the state (in this case the model parameters and the RNG). `loss_function` returns a scalar loss (using the previously defined `state.loss_function`) between the model output and input targets.2. Differentiate this loss function using [`jax.value_and_grad`](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.htmlevaluate-a-function-and-its-gradient-using-value-and-grad). This is a JAX transformation called [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation), which computes the gradient of `loss_function` given the input to the function (i.e., the parameters of the model), and returns the value and the gradient in a pair `(loss, gradients)`.3. Compute the mean gradient over all devices using the collective operation [lax.pmean](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.pmean.html). As we will see below, each device runs `train_step` on a different batch of data, but by taking the mean here we ensure the model parameters are the same on all devices.4. Use `state.apply_gradients`, which applies the gradients to the weights.Below, you can see how each of the described steps above is put into practice.<jupyter_code>def train_step(state, batch, dropout_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def loss_fn(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] # compute loss, ignore padded input tokens label_mask = jax.numpy.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average loss = loss.sum() / label_mask.sum() return loss grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = jax.lax.pmean( {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" ) return new_state, metrics, new_dropout_rng<jupyter_output><empty_output><jupyter_text>Now, we want to do parallelized training over all TPU devices. To do so, we use [`jax.pmap`](https://jax.readthedocs.io/en/latest/jax.html?highlight=pmapparallelization-pmap). This will compile the function once and run the same program on each device (it is an [SPMD program](https://en.wikipedia.org/wiki/SPMD)). When calling this pmapped function, all inputs (`"state"`, `"batch"`, `"dropout_rng"`) should be replicated for all devices, which means that the first axis of each argument is used to map over all TPU devices.<jupyter_code>parallel_train_step = jax.pmap(train_step, "batch")<jupyter_output><empty_output><jupyter_text>Similarly, we can now define the evaluation step. Here, the function is much easier as we don't need to compute any gradients. To better monitor the performance improvement during training, the accuracy is computed alongside the loss and stored in a `metric` dictionary during evaluation.<jupyter_code>def eval_step(params, batch): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] label_mask = jax.numpy.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # compute accuracy accuracy = jax.numpy.equal(jax.numpy.argmax(logits, axis=-1), labels) * label_mask # summarize metrics metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()} metrics = jax.lax.psum(metrics, axis_name="batch") return metrics<jupyter_output><empty_output><jupyter_text>Similarly, we also apply `jax.pmap` to the evaluation step.<jupyter_code>parallel_eval_step = jax.pmap(eval_step, "batch")<jupyter_output><empty_output><jupyter_text>Next, we replicate/copy the weight parameters on each device, so that we can pass them to our parallelized mapped functions.<jupyter_code>state = flax.jax_utils.replicate(state)<jupyter_output>/usr/local/lib/python3.7/dist-packages/jax/lib/xla_bridge.py:317: UserWarning: jax.host_count has been renamed to jax.process_count. This alias will eventually be removed; please update your code. "jax.host_count has been renamed to jax.process_count. This alias " /usr/local/lib/python3.7/dist-packages/jax/lib/xla_bridge.py:304: UserWarning: jax.host_id has been renamed to jax.process_index. This alias will eventually be removed; please update your code. "jax.host_id has been renamed to jax.process_index. This alias "<jupyter_text>To monitor the performance during training, we accumulate the loss and the accuracy of each evaluation step. Because the loss is not computed on most input tokens, we need to normalize the accuracy and loss before computing the average. Let's wrap this logit into a `process_eval_metrics` function to not clutter the training loop too much.<jupyter_code>def process_eval_metrics(metrics): metrics = get_metrics(metrics) metrics = jax.tree_map(jax.numpy.sum, metrics) normalizer = metrics.pop("normalizer") metrics = jax.tree_map(lambda x: x / normalizer, metrics) return metrics<jupyter_output><empty_output><jupyter_text>We can almost start training! In a final preparation step, we generate a seeded [**PRNGKey**](https://jax.readthedocs.io/en/latest/_autosummary/jax.random.PRNGKey.htmljax-random-prngkey) used as the random seed for dropout layers and dataset shuffling.Similar to how we had to copy/replicate the state on all 8 TPU devices, we also need to generate one `PRNGKey` per device, which is why we split the initial `rng` key into 8 random seeds.<jupyter_code>rng = jax.random.PRNGKey(training_seed) dropout_rngs = jax.random.split(rng, jax.local_device_count())<jupyter_output><empty_output><jupyter_text>Now, we are all set to finally start training! Let's put all the pieces together and write the training loop. We start each epoch by generating a new random seed that will be used for dataset shuffling, the dropout layers and the input token masking. Next, we generate the training dataset indices.In the first nested loop - the training loop - we shard the input batch on all 8 TPU devices, and run the training step. Analogs, in the second nested loop - the evaluation loop - the evaluation batches are sharded and the evaluation step is run.**Note**: It might seem that the following cell "hangs" when executed for the first time. This is because JAX first traces & compiles the code, the very first time it is run. After the first training step, you should notice that execution is much faster.<jupyter_code>for epoch in tqdm(range(1, num_epochs + 1), desc=f"Epoch ...", position=0, leave=True): rng, input_rng = jax.random.split(rng) # -- Train -- train_batch_idx = generate_batch_splits(len(tokenized_datasets["train"]), total_batch_size, rng=input_rng) with tqdm(total=len(train_batch_idx), desc="Training...", leave=False) as progress_bar_train: for batch_idx in train_batch_idx: model_inputs = data_collator(tokenized_datasets["train"][batch_idx], tokenizer=tokenizer, pad_to_multiple_of=16) # Model forward model_inputs = shard(model_inputs.data) state, train_metric, dropout_rngs = parallel_train_step(state, model_inputs, dropout_rngs) progress_bar_train.update(1) progress_bar_train.write( f"Train... ({epoch}/{num_epochs} | Loss: {round(train_metric['loss'].mean(), 3)}, Learning Rate: {round(train_metric['learning_rate'].mean(), 6)})" ) # -- Eval -- eval_batch_idx = generate_batch_splits(len(tokenized_datasets["validation"]), total_batch_size) eval_metrics = [] with tqdm(total=len(eval_batch_idx), desc="Evaluation...", leave=False) as progress_bar_eval: for batch_idx in eval_batch_idx: model_inputs = data_collator(tokenized_datasets["validation"][batch_idx], tokenizer=tokenizer) # Model forward model_inputs = shard(model_inputs.data) eval_metric = parallel_eval_step(state.params, model_inputs) eval_metrics.append(eval_metric) progress_bar_eval.update(1) eval_metrics_dict = process_eval_metrics(eval_metrics) progress_bar_eval.write( f"Eval... ({epoch}/{num_epochs} | Loss: {eval_metrics_dict['loss']}, Acc: {eval_metrics_dict['accuracy']})" )<jupyter_output><empty_output>
notebooks/examples/masked_language_modeling_flax.ipynb/0
{ "file_path": "notebooks/examples/masked_language_modeling_flax.ipynb", "repo_id": "notebooks", "token_count": 10194 }
167
<jupyter_start><jupyter_text>Segment Anything Model using `transformers` 🤗 library| | | ||---------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|This notebook demonstrates how to use the Segment Anything Model (SAM) to segment objects in images. The model has been released by Meta AI in the paper [Segment Anything Model](https://ai.facebook.com/research/publications/segment-anything/). The original source code can be found [here](https://github.com/facebookresearch/segment-anything)This notebook demonstrates how to use `transformers` to leverage the different usecases of the model. The examples are heavily inspired from the [original notebook of the authors](https://github.com/facebookresearch/segment-anything/blob/main/notebooks/predictor_example.ipynb).As stated by that notebook: > The Segment Anything Model (SAM) predicts object masks given prompts that indicate the desired object. The model first converts the image into an image embedding that allows high quality masks to be efficiently produced from a prompt.<jupyter_code>!pip install -q transformers<jupyter_output>Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.toml) ... [?25l[?25hdone  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 7.8/7.8 MB 81.8 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 200.1/200.1 kB 27.2 MB/s eta 0:00:00 [?25h Building wheel for transformers (pyproject.toml) ... [?25l[?25hdone<jupyter_text>Utility functions Run the cells below to import the needed utility functions for displaying the masks!<jupyter_code>import numpy as np import matplotlib.pyplot as plt def show_mask(mask, ax, random_color=False): if random_color: color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) else: color = np.array([30/255, 144/255, 255/255, 0.6]) h, w = mask.shape[-2:] mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) ax.imshow(mask_image) def show_box(box, ax): x0, y0 = box[0], box[1] w, h = box[2] - box[0], box[3] - box[1] ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) def show_boxes_on_image(raw_image, boxes): plt.figure(figsize=(10,10)) plt.imshow(raw_image) for box in boxes: show_box(box, plt.gca()) plt.axis('on') plt.show() def show_points_on_image(raw_image, input_points, input_labels=None): plt.figure(figsize=(10,10)) plt.imshow(raw_image) input_points = np.array(input_points) if input_labels is None: labels = np.ones_like(input_points[:, 0]) else: labels = np.array(input_labels) show_points(input_points, labels, plt.gca()) plt.axis('on') plt.show() def show_points_and_boxes_on_image(raw_image, boxes, input_points, input_labels=None): plt.figure(figsize=(10,10)) plt.imshow(raw_image) input_points = np.array(input_points) if input_labels is None: labels = np.ones_like(input_points[:, 0]) else: labels = np.array(input_labels) show_points(input_points, labels, plt.gca()) for box in boxes: show_box(box, plt.gca()) plt.axis('on') plt.show() def show_points_and_boxes_on_image(raw_image, boxes, input_points, input_labels=None): plt.figure(figsize=(10,10)) plt.imshow(raw_image) input_points = np.array(input_points) if input_labels is None: labels = np.ones_like(input_points[:, 0]) else: labels = np.array(input_labels) show_points(input_points, labels, plt.gca()) for box in boxes: show_box(box, plt.gca()) plt.axis('on') plt.show() def show_points(coords, labels, ax, marker_size=375): pos_points = coords[labels==1] neg_points = coords[labels==0] ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) def show_masks_on_image(raw_image, masks, scores): if len(masks.shape) == 4: masks = masks.squeeze() if scores.shape[0] == 1: scores = scores.squeeze() nb_predictions = scores.shape[-1] fig, axes = plt.subplots(1, nb_predictions, figsize=(15, 15)) for i, (mask, score) in enumerate(zip(masks, scores)): mask = mask.cpu().detach() axes[i].imshow(np.array(raw_image)) show_mask(mask, axes[i]) axes[i].title.set_text(f"Mask {i+1}, Score: {score.item():.3f}") axes[i].axis("off") plt.show()<jupyter_output><empty_output><jupyter_text>Model loading Use the `from_pretrained` method on the `SamForMaskGeneration` class to load the model from the Hub! For the sake of this demonstration we will use the `vit-huge` checkpoint.<jupyter_code>import torch from transformers import SamModel, SamProcessor device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = SamModel.from_pretrained("facebook/sam-vit-huge").to(device) processor = SamProcessor.from_pretrained("facebook/sam-vit-huge")<jupyter_output>/home/younes_huggingface_co/miniconda3/envs/fix-test/lib/python3.9/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm<jupyter_text>Run predictions Let's deeply dive into how you can run different type of predictions, given different inputs. You will see how to- Generate segmentation masks given a 2D localization- Generate segmentation masks per given localization (one prediction per 2D point)- Generate segmentation masks given a bounding box- Generate segmentation masks given a bounding box and a 2D points- Generate segmentat Load the example image<jupyter_code>from PIL import Image import requests img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") plt.imshow(raw_image)<jupyter_output><empty_output><jupyter_text>Step 1: Retrieve the image embeddingsIn order to avoid computing multiple times the same image embeddings, we will compute it only once, and use these embeddings to directly feed them to the model for faster inference<jupyter_code>inputs = processor(raw_image, return_tensors="pt").to(device) image_embeddings = model.get_image_embeddings(inputs["pixel_values"])<jupyter_output><empty_output><jupyter_text>Usecase 1: Feed a set of 2D points to predict a maskLet's first focus on the first classic usecase of SAM. You can feed the model a set of 2D points to predict a segmentation mask. The more you provide 2D points, the better the resulting mask will be. In this example, let's try to predict the mask that corresponds to the top left window of the parked car.The input points needs to be in the format:`nb_images, nb_predictions, nb_points_per_mask, 2`With SAM you can either predict a single prediction given multiple points, or a prediction per point. This is denoted by `nb_predictions` dimension. We will see in the next sections how to perform this type of prediction<jupyter_code>input_points = [[[450, 600]]] show_points_on_image(raw_image, input_points[0])<jupyter_output><empty_output><jupyter_text>For that, simply pass the raw image, the points<jupyter_code>inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(device) # pop the pixel_values as they are not neded inputs.pop("pixel_values", None) inputs.update({"image_embeddings": image_embeddings}) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) scores = outputs.iou_scores show_masks_on_image(raw_image, masks[0], scores)<jupyter_output><empty_output><jupyter_text>As you can see, the predicted masks are sorted in their IoU score order. The first mask indeed seems to correspond to the mask of the top right window of the parked car. You can also feed a set of points to predict a single mask. Let's try to predict a mask, given two points<jupyter_code>input_points = [[[550, 600], [2100, 1000]]] show_points_on_image(raw_image, input_points) inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(device) # pop the pixel_values as they are not neded inputs.pop("pixel_values", None) inputs.update({"image_embeddings": image_embeddings}) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) scores = outputs.iou_scores show_masks_on_image(raw_image, masks[0], scores)<jupyter_output><empty_output><jupyter_text>Usecase 2: Predict segmentations masks using bounding boxes It is possible to feed bounding boxes to the model to predict segmentation masks of the object of interest in that region.The bounding box needs to be a list of points, corresponding to the flattened coordinates of the top left point, and bottom right point of the bounding box. Let's look at an example below<jupyter_code>input_boxes = [[[650, 900, 1000, 1250]]] show_boxes_on_image(raw_image, input_boxes[0])<jupyter_output><empty_output><jupyter_text>We will try to segment the wheel that is present inside the bounding box! For that just run the following snippet<jupyter_code>inputs = processor(raw_image, input_boxes=[input_boxes], return_tensors="pt").to(device) inputs["input_boxes"].shape inputs.pop("pixel_values", None) inputs.update({"image_embeddings": image_embeddings}) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) scores = outputs.iou_scores show_masks_on_image(raw_image, masks[0], scores)<jupyter_output><empty_output><jupyter_text>It is possible to feed multiple boxes, however, this will lead to having one prediction per bounding box. i.e., you cannot combine multiple bounding boxes to get a single prediction. However, you can combine points and bounding boxes to get a prediction, and we will cover that in the next section Usecase 3: Predict segmentation masks given points and bounding boxes<jupyter_code>input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] show_points_and_boxes_on_image(raw_image, input_boxes[0], input_points[0]) inputs = processor(raw_image, input_boxes=[input_boxes], input_points=[input_points], return_tensors="pt").to(device) inputs.pop("pixel_values", None) inputs.update({"image_embeddings": image_embeddings}) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) scores = outputs.iou_scores show_masks_on_image(raw_image, masks[0][0], scores[:, 0, :])<jupyter_output><empty_output><jupyter_text>You can also pass points with a label to segment out that region. Let us have a deeper look below<jupyter_code>input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] labels = [0] show_points_and_boxes_on_image(raw_image, input_boxes[0], input_points[0], labels) input_boxes = [[[620, 900, 1000, 1255]]] input_points = [[[820, 1080]]] labels = [[0]] inputs = processor(raw_image, input_boxes=[input_boxes], input_points=[input_points], input_labels=[labels], return_tensors="pt").to(device) inputs.pop("pixel_values", None) inputs.update({"image_embeddings": image_embeddings}) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) scores = outputs.iou_scores show_masks_on_image(raw_image, masks[0][0], scores[:, 0, :])<jupyter_output><empty_output><jupyter_text>As you can see, the model managed to "ignore" the component that was specified by the point with the label `0`. Usecase 4: Predict multiple masks per imageWith SAM, you can also predict multiple masks per image. You can achieve that in two possible scenarios- Feed multiple points in the `nb_predictions` dimension- Feed multiple bounding boxes to the same image<jupyter_code>input_points = [[[850, 1100], [2250, 1000]]] show_points_on_image(raw_image, input_points)<jupyter_output><empty_output><jupyter_text>Sub-usecase 1: one prediction per point To benefit from what we have described in the first bullet point, just change the input array to<jupyter_code>input_points = [[[850, 1100]], [[2250, 1000]]]<jupyter_output><empty_output><jupyter_text>In order to add the desired dimension, and pass it to the `SamProcessor`<jupyter_code>input_points = [[[[850, 1100]], [[2250, 1000]]]] inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(device) inputs["input_points"].shape inputs.pop("pixel_values", None) inputs.update({"image_embeddings": image_embeddings}) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) scores = outputs.iou_scores<jupyter_output><empty_output><jupyter_text>Let's print the shapes of the output to understand better what is going on<jupyter_code>scores.shape<jupyter_output><empty_output><jupyter_text>Here the first dimension corresponds to the image batch size, the second dimension corresponds to the `nb_predictions` dimension. And the last dimension is the number of predicted masks **per prediction** , and it is set to 3 by default according to the official implementation<jupyter_code>show_masks_on_image(raw_image, masks[0][0], scores[:, 0, :]) show_masks_on_image(raw_image, masks[0][1], scores[:, 0, :])<jupyter_output><empty_output><jupyter_text>Sub-usecase 2: Feed multiple bounding boxes to the same image You can also feed multiple bounding boxes to the same image and get one prediction per bounding box.<jupyter_code>input_boxes = [[[650, 900, 1000, 1250], [2050, 800, 2400, 1150]]] show_boxes_on_image(raw_image, input_boxes[0])<jupyter_output><empty_output><jupyter_text>Just pass the input boxes as follows, to match the convention of the processor<jupyter_code>input_boxes = [[[650, 900, 1000, 1250], [2050, 800, 2400, 1150]]] inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="pt").to(device) inputs["input_boxes"].shape<jupyter_output><empty_output><jupyter_text>This time, let's just output a single mask per box, for that we can just pass `multimask_output=False` in the forward pass<jupyter_code>inputs.pop("pixel_values", None) inputs.update({"image_embeddings": image_embeddings}) with torch.no_grad(): outputs = model(**inputs, multimask_output=False) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) scores = outputs.iou_scores scores.shape<jupyter_output><empty_output><jupyter_text>As you can see, here we have predicted 2 masks in total! Let's check them now<jupyter_code>show_masks_on_image(raw_image, masks[0], scores)<jupyter_output><empty_output>
notebooks/examples/segment_anything.ipynb/0
{ "file_path": "notebooks/examples/segment_anything.ipynb", "repo_id": "notebooks", "token_count": 5421 }
168
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it.<jupyter_code>#! pip install datasets transformers seqeval<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code># !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.11.0 since the functionality was introduced in that version:<jupyter_code>import transformers print(transformers.__version__)<jupyter_output><empty_output><jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/token-classification). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("token_classification_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a token classification task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model to a token classification task, which is the task of predicting a label for each token.The most common token classification tasks are:- NER (Named-entity recognition) Classify the entities in the text (person, organization, location...).- POS (Part-of-speech tagging) Grammatically classify the tokens (noun, verb, adjective...)- Chunk (Chunking) Grammatically classify the tokens and group them into "chunks" that go togetherWe will see how to easily load a dataset for these kinds of tasks and use the `Trainer` API to fine-tune a model on it. This notebook is built to run on any token classification task, with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a token classification head and a fast tokenizer (check on [this table](https://huggingface.co/transformers/index.htmlbigtable) if this is the case). It might just need some small adjustments if you decide to use a different dataset than the one used here. Depending on you model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those three parameters, then the rest of the notebook should run smoothly:<jupyter_code>task = "ner" # Should be one of "ner", "pos" or "chunk" model_checkpoint = "distilbert-base-uncased" batch_size = 16<jupyter_output><empty_output><jupyter_text>Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`.<jupyter_code>from datasets import load_dataset, load_metric<jupyter_output><empty_output><jupyter_text>For our example here, we'll use the [CONLL 2003 dataset](https://www.aclweb.org/anthology/W03-0419.pdf). The notebook should work with any token classification dataset provided by the 🤗 Datasets library. If you're using your own dataset defined from a JSON or csv file (see the [Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets.htmlfrom-local-files) on how to load them), it might need some adjustments in the names of the columns used.<jupyter_code>datasets = load_dataset("conll2003")<jupyter_output>Reusing dataset conll2003 (/home/sgugger/.cache/huggingface/datasets/conll2003/conll2003/1.0.0/63ba56944e35c1943434322a07ceefd79864672041b7834583709af4a5de4664)<jupyter_text>The `datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set.<jupyter_code>datasets<jupyter_output><empty_output><jupyter_text>We can see the training, validation and test sets all have a column for the tokens (the input texts split into words) and one column of labels for each kind of task we introduced before. To access an actual element, you need to select a split first, then give an index:<jupyter_code>datasets["train"][0]<jupyter_output><empty_output><jupyter_text>The labels are already coded as integer ids to be easily usable by our model, but the correspondence with the actual categories is stored in the `features` of the dataset:<jupyter_code>datasets["train"].features[f"ner_tags"]<jupyter_output><empty_output><jupyter_text>So for the NER tags, 0 corresponds to 'O', 1 to 'B-PER' etc... On top of the 'O' (which means no special entity), there are four labels for NER here, each prefixed with 'B-' (for beginning) or 'I-' (for intermediate), that indicate if the token is the first one for the current group with the label or not:- 'PER' for person- 'ORG' for organization- 'LOC' for location- 'MISC' for miscellaneous Since the labels are lists of `ClassLabel`, the actual names of the labels are nested in the `feature` attribute of the object above:<jupyter_code>label_list = datasets["train"].features[f"{task}_tags"].feature.names label_list<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset (automatically decoding the labels in passing).<jupyter_code>from datasets import ClassLabel, Sequence import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) elif isinstance(typ, Sequence) and isinstance(typ.feature, ClassLabel): df[column] = df[column].transform(lambda x: [typ.feature.names[i] for i in x]) display(HTML(df.to_html())) show_random_elements(datasets["train"])<jupyter_output><empty_output><jupyter_text>Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>The following assertion ensures that our tokenizer is a fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. Those fast tokenizers are available for almost all models, and we will need some of the special features they have for our preprocessing.<jupyter_code>import transformers assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast)<jupyter_output><empty_output><jupyter_text>You can check which type of models have a fast tokenizer available and which don't on the [big table of models](https://huggingface.co/transformers/index.htmlbigtable). You can directly call this tokenizer on one sentence:<jupyter_code>tokenizer("Hello, this is one sentence!")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.If, as is the case here, your inputs have already been split into words, you should pass the list of words to your tokenzier with the argument `is_split_into_words=True`:<jupyter_code>tokenizer(["Hello", ",", "this", "is", "one", "sentence", "split", "into", "words", "."], is_split_into_words=True)<jupyter_output><empty_output><jupyter_text>Note that transformers are often pretrained with subword tokenizers, meaning that even if your inputs have been split into words already, each of those words could be split again by the tokenizer. Let's look at an example of that:<jupyter_code>example = datasets["train"][4] print(example["tokens"]) tokenized_input = tokenizer(example["tokens"], is_split_into_words=True) tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"]) print(tokens)<jupyter_output>['[CLS]', 'germany', "'", 's', 'representative', 'to', 'the', 'european', 'union', "'", 's', 'veterinary', 'committee', 'werner', 'z', '##wing', '##mann', 'said', 'on', 'wednesday', 'consumers', 'should', 'buy', 'sheep', '##me', '##at', 'from', 'countries', 'other', 'than', 'britain', 'until', 'the', 'scientific', 'advice', 'was', 'clearer', '.', '[SEP]']<jupyter_text>Here the words "Zwingmann" and "sheepmeat" have been split in three subtokens.This means that we need to do some processing on our labels as the input ids returned by the tokenizer are longer than the lists of labels our dataset contain, first because some special tokens might be added (we can a `[CLS]` and a `[SEP]` above) and then because of those possible splits of words in multiple tokens:<jupyter_code>len(example[f"{task}_tags"]), len(tokenized_input["input_ids"])<jupyter_output><empty_output><jupyter_text>Thankfully, the tokenizer returns outputs that have a `word_ids` method which can help us.<jupyter_code>print(tokenized_input.word_ids())<jupyter_output>[None, 0, 1, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 11, 11, 11, 12, 13, 14, 15, 16, 17, 18, 18, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, None]<jupyter_text>As we can see, it returns a list with the same number of elements as our processed input ids, mapping special tokens to `None` and all other tokens to their respective word. This way, we can align the labels with the processed input ids.<jupyter_code>word_ids = tokenized_input.word_ids() aligned_labels = [-100 if i is None else example[f"{task}_tags"][i] for i in word_ids] print(len(aligned_labels), len(tokenized_input["input_ids"]))<jupyter_output>39 39<jupyter_text>Here we set the labels of all special tokens to -100 (the index that is ignored by PyTorch) and the labels of all other tokens to the label of the word they come from. Another strategy is to set the label only on the first token obtained from a given word, and give a label of -100 to the other subtokens from the same word. We propose the two strategies here, just change the value of the following flag:<jupyter_code>label_all_tokens = True<jupyter_output><empty_output><jupyter_text>We're now ready to write the function that will preprocess our samples. We feed them to the `tokenizer` with the argument `truncation=True` (to truncate texts that are bigger than the maximum size allowed by the model) and `is_split_into_words=True` (as seen above). Then we align the labels with the token ids using the strategy we picked:<jupyter_code>def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) labels = [] for i, label in enumerate(examples[f"{task}_tags"]): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label[word_idx]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label[word_idx] if label_all_tokens else -100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>tokenize_and_align_labels(datasets['train'][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>tokenized_datasets = datasets.map(tokenize_and_align_labels, batched=True)<jupyter_output>Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/conll2003/conll2003/1.0.0/63ba56944e35c1943434322a07ceefd79864672041b7834583709af4a5de4664/cache-bbadd12ccd0f4a48.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/conll2003/conll2003/1.0.0/63ba56944e35c1943434322a07ceefd79864672041b7834583709af4a5de4664/cache-88a1111919934047.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/conll2003/conll2003/1.0.0/63ba56944e35c1943434322a07ceefd79864672041b7834583709af4a5de4664/cache-b231bad83620c1d7.arrow<jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since all our tasks are about token classification, we use the `AutoModelForTokenClassification` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us. The only thing we have to specify is the number of labels for our problem (which we can get from the features, as seen before):<jupyter_code>from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer model = AutoModelForTokenClassification.from_pretrained(model_checkpoint, num_labels=len(label_list))<jupyter_output>Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForTokenClassification: ['vocab_transform.weight', 'vocab_transform.bias', 'vocab_layer_norm.weight', 'vocab_layer_norm.bias', 'vocab_projector.weight', 'vocab_projector.bias'] - This IS expected if you are initializing DistilBertForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing DistilBertForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of DistilBertForTokenClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.weight', 'classifier.bias'] You should probably TRAIN t[...]<jupyter_text>The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. To instantiate a `Trainer`, we will need to define three more things. The most important is the [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments), which is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional:<jupyter_code>model_name = model_checkpoint.split("/")[-1] args = TrainingArguments( f"{model_name}-finetuned-{task}", evaluation_strategy = "epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=3, weight_decay=0.01, push_to_hub=True, )<jupyter_output><empty_output><jupyter_text>Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay.The last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"sgugger/bert-finetuned-ner"` or `"huggingface/bert-finetuned-ner"`). Then we will need a data collator that will batch our processed examples together while applying padding to make them all the same size (each pad will be padded to the length of its longest example). There is a data collator for this task in the Transformers library, that not only pads the inputs, but also the labels:<jupyter_code>from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer)<jupyter_output><empty_output><jupyter_text>The last thing to define for our `Trainer` is how to compute the metrics from the predictions. Here we will load the [`seqeval`](https://github.com/chakki-works/seqeval) metric (which is commonly used to evaluate results on the CONLL dataset) via the Datasets library.<jupyter_code>metric = load_metric("seqeval")<jupyter_output><empty_output><jupyter_text>This metric takes list of labels for the predictions and references:<jupyter_code>labels = [label_list[i] for i in example[f"{task}_tags"]] metric.compute(predictions=[labels], references=[labels])<jupyter_output><empty_output><jupyter_text>So we will need to do a bit of post-processing on our predictions:- select the predicted index (with the maximum logit) for each token- convert it to its string label- ignore everywhere we set a label of -100The following function does all this post-processing on the result of `Trainer.evaluate` (which is a namedtuple containing predictions and labels) before applying the metric:<jupyter_code>import numpy as np def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], }<jupyter_output><empty_output><jupyter_text>Note that we drop the precision/recall/f1 computed for each category and only focus on the overall precision/recall/f1/accuracy.Then we just need to pass all of this along with our datasets to the `Trainer`:<jupyter_code>trainer = Trainer( model, args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics )<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>trainer.train()<jupyter_output><empty_output><jupyter_text>The `evaluate` method allows you to evaluate again on the evaluation dataset or on another dataset:<jupyter_code>trainer.evaluate()<jupyter_output><empty_output><jupyter_text>To get the precision/recall/f1 computed for each category now that we have finished training, we can apply the same function as before on the result of the `predict` method:<jupyter_code>predictions, labels, _ = trainer.predict(tokenized_datasets["validation"]) predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) results<jupyter_output><empty_output><jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction:<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output>
notebooks/examples/token_classification.ipynb/0
{ "file_path": "notebooks/examples/token_classification.ipynb", "repo_id": "notebooks", "token_count": 7204 }
169
import functools import math import os # noqa: F401 from random import choice, randint from time import time import numpy as np import torch import torch.utils.checkpoint as checkpoint from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler from tqdm import tqdm import faiss # noqa: F401 import nlp # noqa: F401 import pandas as pd from elasticsearch import Elasticsearch # noqa: F401 from elasticsearch.helpers import bulk, streaming_bulk # noqa: F401 from transformers import AdamW, AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup pd.set_option("display.max_colwidth", None) ############### # Sparse index ############### def make_es_index_snippets(es_client, passages_dset, index_name="english_wiki_kilt_snippets_100w"): index_config = { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "article_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "section_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "passage_text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, } }, } es_client.indices.create(index=index_name, body=index_config) number_of_docs = passages_dset.num_rows progress = tqdm(unit="docs", total=number_of_docs) successes = 0 def passage_generator(): for passage in passages_dset: yield passage # create the ES index for ok, action in streaming_bulk(client=es_client, index=index_name, actions=passage_generator(),): progress.update(1) successes += ok print("Indexed %d documents" % (successes,)) def query_es_index(question, es_client, index_name="english_wiki_kilt_snippets_100w", n_results=10, min_length=20): q = question.lower() banned = ["how", "why", "what", "where", "which", "do", "does", "is", "?", "eli5", "eli5:"] q = " ".join([w for w in q.split() if w not in banned]) response = es_client.search( index=index_name, body={ "query": { "multi_match": { "query": q, "fields": ["article_title", "section_title", "passage_text^2"], "type": "cross_fields", } }, "size": 2 * n_results, }, ) hits = response["hits"]["hits"] support_doc = "<P> " + " <P> ".join([hit["_source"]["passage_text"] for hit in hits]) res_list = [dict([(k, hit["_source"][k]) for k in hit["_source"] if k != "passage_text"]) for hit in hits] for r, hit in zip(res_list, hits): r["passage_id"] = hit["_id"] r["score"] = hit["_score"] r["passage_text"] = hit["_source"]["passage_text"] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] return support_doc, res_list ############### # ELI5 retriever training ############### class ELI5DatasetQARetriver(Dataset): def __init__(self, examples_array, extra_answer_threshold=3, min_answer_length=64, training=True, n_samples=None): self.data = examples_array self.answer_thres = extra_answer_threshold self.min_length = min_answer_length self.training = training self.n_samples = self.data.num_rows if n_samples is None else n_samples def __len__(self): return self.n_samples def make_example(self, idx): example = self.data[idx] question = example["title"] if self.training: answers = [a for i, (a, sc) in enumerate(zip(example["answers"]["text"], example["answers"]["score"]))] answer_tab = choice(answers).split(" ") start_idx = randint(0, max(0, len(answer_tab) - self.min_length)) answer_span = " ".join(answer_tab[start_idx:]) else: answer_span = example["answers"]["text"][0] return (question, answer_span) def __getitem__(self, idx): return self.make_example(idx % self.data.num_rows) class RetrievalQAEmbedder(torch.nn.Module): def __init__(self, sent_encoder, dim): super(RetrievalQAEmbedder, self).__init__() self.sent_encoder = sent_encoder self.output_dim = 128 self.project_q = torch.nn.Linear(dim, self.output_dim, bias=False) self.project_a = torch.nn.Linear(dim, self.output_dim, bias=False) self.ce_loss = torch.nn.CrossEntropyLoss(reduction="mean") def embed_sentences_checkpointed(self, input_ids, attention_mask, checkpoint_batch_size=-1): # reproduces BERT forward pass with checkpointing if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size: return self.sent_encoder(input_ids, attention_mask=attention_mask)[1] else: # prepare implicit variables device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = [None] * self.sent_encoder.config.num_hidden_layers extended_attention_mask: torch.Tensor = self.sent_encoder.get_extended_attention_mask( attention_mask, input_shape, device ) # define function for checkpointing def partial_encode(*inputs): encoder_outputs = self.sent_encoder.encoder(inputs[0], attention_mask=inputs[1], head_mask=head_mask,) sequence_output = encoder_outputs[0] pooled_output = self.sent_encoder.pooler(sequence_output) return pooled_output # run embedding layer on everything at once embedding_output = self.sent_encoder.embeddings( input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None ) # run encoding and pooling on one mini-batch at a time pooled_output_list = [] for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)): b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions(self, q_ids, q_mask, checkpoint_batch_size=-1): q_reps = self.embed_sentences_checkpointed(q_ids, q_mask, checkpoint_batch_size) return self.project_q(q_reps) def embed_answers(self, a_ids, a_mask, checkpoint_batch_size=-1): a_reps = self.embed_sentences_checkpointed(a_ids, a_mask, checkpoint_batch_size) return self.project_a(a_reps) def forward(self, q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=-1): device = q_ids.device q_reps = self.embed_questions(q_ids, q_mask, checkpoint_batch_size) a_reps = self.embed_answers(a_ids, a_mask, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = (loss_qa + loss_aq) / 2 return loss def make_qa_retriever_model(model_name="google/bert_uncased_L-8_H-512_A-8", from_file=None, device="cuda:0"): tokenizer = AutoTokenizer.from_pretrained(model_name) bert_model = AutoModel.from_pretrained(model_name).to(device) # run bert_model on a dummy batch to get output dimension d_ids = torch.LongTensor( [[bert_model.config.bos_token_id if bert_model.config.bos_token_id is not None else 1]] ).to(device) d_mask = torch.LongTensor([[1]]).to(device) sent_dim = bert_model(d_ids, attention_mask=d_mask)[1].shape[-1] qa_embedder = RetrievalQAEmbedder(bert_model, sent_dim).to(device) if from_file is not None: param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states qa_embedder.load_state_dict(param_dict["model"]) return tokenizer, qa_embedder def make_qa_retriever_batch(qa_list, tokenizer, max_len=64, device="cuda:0"): q_ls = [q for q, a in qa_list] a_ls = [a for q, a in qa_list] q_toks = tokenizer.batch_encode_plus(q_ls, max_length=max_len, pad_to_max_length=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) a_toks = tokenizer.batch_encode_plus(a_ls, max_length=max_len, pad_to_max_length=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) return (q_ids, q_mask, a_ids, a_mask) def train_qa_retriever_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0): model.train() # make iterator train_sampler = RandomSampler(dataset) model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, batch in enumerate(epoch_iterator): q_ids, q_mask, a_ids, a_mask = batch pre_loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) loss = pre_loss.sum() # optimizer loss.backward() optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0 or step == 1: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def train_qa_retriever_joint_epoch(model, dataset_list, tokenizer, optimizer, scheduler, args, e=0): model.train() model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) # make iterator train_samplers = [RandomSampler(dataset) for dataset in dataset_list] data_loaders = [ DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) for dataset, train_sampler in zip(dataset_list, train_samplers) ] iterators = [iter(dloader) for dloader in data_loaders] joint_iter = zip(*iterators) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, (batches,) in enumerate(zip(joint_iter)): for batch in batches: q_ids, q_mask, a_ids, a_mask = batch loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) # optimizer loss.backward() optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset_list[0]) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def evaluate_qa_retriever(model, dataset, tokenizer, args): model.eval() # make iterator eval_sampler = SequentialSampler(dataset) model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=eval_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) tot_loss = 0.0 with torch.no_grad(): for step, batch in enumerate(epoch_iterator): q_ids, q_mask, a_ids, a_mask = batch loss = model(q_ids, q_mask, a_ids, a_mask) tot_loss += loss.item() return tot_loss / (step + 1) def train_qa_retriever(qar_model, qar_tokenizer, qar_train_dset, qar_valid_dset, qar_args): qar_optimizer = AdamW(qar_model.parameters(), lr=qar_args.learning_rate, eps=1e-8) qar_scheduler = get_linear_schedule_with_warmup( qar_optimizer, num_warmup_steps=100, num_training_steps=(qar_args.num_epochs + 1) * math.ceil(len(qar_train_dset) / qar_args.batch_size), ) for e in range(qar_args.num_epochs): train_qa_retriever_epoch(qar_model, qar_train_dset, qar_tokenizer, qar_optimizer, qar_scheduler, qar_args, e) m_save_dict = { "model": qar_model.state_dict(), "optimizer": qar_optimizer.state_dict(), "scheduler": qar_scheduler.state_dict(), } print("Saving model {}".format(qar_args.model_save_name)) torch.save(m_save_dict, "{}_{}.pth".format(qar_args.model_save_name, e)) eval_loss = evaluate_qa_retriever(qar_model, qar_valid_dset, qar_tokenizer, qar_args) print("Evaluation loss epoch {:4d}: {:.3f}".format(e, eval_loss)) ############### # ELI5 seq2seq model training ############### class ELI5DatasetS2S(Dataset): def __init__( self, examples_array, make_doc_fun=None, extra_answer_threshold=3, document_cache=None, training=True ): self.training = training self.data = examples_array self.make_doc_function = make_doc_fun self.document_cache = {} if document_cache is None else document_cache assert not (make_doc_fun is None and document_cache is None) # make index of specific question-answer pairs from multi-answers if self.training: self.qa_id_list = [ (i, j) for i, qa in enumerate(self.data) for j, (a, sc) in enumerate(zip(qa["answers"]["text"], qa["answers"]["score"])) if j == 0 or sc >= extra_answer_threshold ] else: self.qa_id_list = [(i, 0) for i in range(self.data.num_rows)] def __len__(self): return len(self.qa_id_list) def make_example(self, idx): i, j = self.qa_id_list[idx] example = self.data[i] question = example["title"] + " " + example["selftext"] answer = example["answers"]["text"][j] q_id = example["q_id"] if self.make_doc_function is not None: self.document_cache[q_id] = self.document_cache.get(q_id, self.make_doc_function(example["title"])) document = self.document_cache[q_id] in_st = "question: {} context: {}".format( question.lower().replace(" --t--", "").strip(), document.lower().strip(), ) out_st = answer return (in_st, out_st) def __getitem__(self, idx): return self.make_example(idx) def make_qa_s2s_model(model_name="facebook/bart-large", from_file=None, device="cuda:0"): tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) if from_file is not None: param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states model.load_state_dict(param_dict["model"]) return tokenizer, model def make_qa_s2s_batch(qa_list, tokenizer, max_len=64, max_a_len=360, device="cuda:0"): q_ls = [q for q, a in qa_list] a_ls = [a for q, a in qa_list] q_toks = tokenizer.batch_encode_plus(q_ls, max_length=max_len, pad_to_max_length=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) a_toks = tokenizer.batch_encode_plus(a_ls, max_length=min(max_len, max_a_len), pad_to_max_length=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) lm_labels = a_ids[:, 1:].contiguous().clone() lm_labels[a_mask[:, 1:].contiguous() == 0] = -100 model_inputs = { "input_ids": q_ids, "attention_mask": q_mask, "decoder_input_ids": a_ids[:, :-1].contiguous(), "lm_labels": lm_labels, } return model_inputs def train_qa_s2s_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0, curriculum=False): model.train() # make iterator if curriculum: train_sampler = SequentialSampler(dataset) else: train_sampler = RandomSampler(dataset) model_collate_fn = functools.partial( make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, batch_inputs in enumerate(epoch_iterator): pre_loss = model(**batch_inputs)[0] loss = pre_loss.sum() / pre_loss.shape[0] loss.backward() # optimizer if step % args.backward_freq == 0: optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0 or step == 1: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def eval_qa_s2s_epoch(model, dataset, tokenizer, args): model.eval() # make iterator train_sampler = SequentialSampler(dataset) model_collate_fn = functools.partial( make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() with torch.no_grad(): for step, batch_inputs in enumerate(epoch_iterator): pre_loss = model(**batch_inputs)[0] loss = pre_loss.sum() / pre_loss.shape[0] loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0: print( "{:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) print("Total \t L: {:.3f} \t -- {:.3f}".format(loc_loss / loc_steps, time() - st_time,)) def train_qa_s2s(qa_s2s_model, qa_s2s_tokenizer, s2s_train_dset, s2s_valid_dset, s2s_args): s2s_optimizer = AdamW(qa_s2s_model.parameters(), lr=s2s_args.learning_rate, eps=1e-8) s2s_scheduler = get_linear_schedule_with_warmup( s2s_optimizer, num_warmup_steps=400, num_training_steps=(s2s_args.num_epochs + 1) * math.ceil(len(s2s_train_dset) / s2s_args.batch_size), ) for e in range(s2s_args.num_epochs): train_qa_s2s_epoch( qa_s2s_model, s2s_train_dset, qa_s2s_tokenizer, s2s_optimizer, s2s_scheduler, s2s_args, e, curriculum=(e == 0), ) m_save_dict = { "model": qa_s2s_model.state_dict(), "optimizer": s2s_optimizer.state_dict(), "scheduler": s2s_scheduler.state_dict(), } print("Saving model {}".format(s2s_args.model_save_name)) eval_qa_s2s_epoch(qa_s2s_model, s2s_valid_dset, qa_s2s_tokenizer, s2s_args) torch.save(m_save_dict, "{}_{}.pth".format(s2s_args.model_save_name, e)) # generate answer from input "question: ... context: <p> ..." def qa_s2s_generate( question_doc, qa_s2s_model, qa_s2s_tokenizer, num_answers=1, num_beams=None, min_len=64, max_len=256, do_sample=False, temp=1.0, top_p=None, top_k=None, max_input_length=512, device="cuda:0", ): model_inputs = make_qa_s2s_batch([(question_doc, "A")], qa_s2s_tokenizer, max_input_length, device=device,) n_beams = num_answers if num_beams is None else max(num_beams, num_answers) generated_ids = qa_s2s_model.generate( input_ids=model_inputs["input_ids"], attention_mask=model_inputs["attention_mask"], min_length=min_len, max_length=max_len, do_sample=do_sample, early_stopping=True, num_beams=1 if do_sample else n_beams, temperature=temp, top_k=top_k, top_p=top_p, eos_token_id=qa_s2s_tokenizer.eos_token_id, no_repeat_ngram_size=3, num_return_sequences=num_answers, decoder_start_token_id=qa_s2s_tokenizer.bos_token_id, ) return [qa_s2s_tokenizer.decode(ans_ids, skip_special_tokens=True).strip() for ans_ids in generated_ids] ############### # ELI5-trained retrieval model usage ############### def embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length=128, device="cuda:0"): a_toks = tokenizer.batch_encode_plus(passages, max_length=max_length, pad_to_max_length=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) with torch.no_grad(): a_reps = qa_embedder.embed_answers(a_ids, a_mask).cpu().type(torch.float) return a_reps.numpy() def embed_questions_for_retrieval(q_ls, tokenizer, qa_embedder, device="cuda:0"): q_toks = tokenizer.batch_encode_plus(q_ls, max_length=128, pad_to_max_length=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) with torch.no_grad(): q_reps = qa_embedder.embed_questions(q_ids, q_mask).cpu().type(torch.float) return q_reps.numpy() def make_qa_dense_index( qa_embedder, tokenizer, passages_dset, batch_size=512, max_length=128, index_name="kilt_passages_reps.dat", dtype="float32", device="cuda:0", ): st_time = time() fp = np.memmap(index_name, dtype=dtype, mode="w+", shape=(passages_dset.num_rows, 128)) n_batches = math.ceil(passages_dset.num_rows / batch_size) for i in range(n_batches): passages = [p for p in passages_dset[i * batch_size : (i + 1) * batch_size]["passage_text"]] reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length, device) fp[i * batch_size : (i + 1) * batch_size] = reps if i % 50 == 0: print(i, time() - st_time) def evaluate_retriever(qa_list, retriever_func, scoring_func, n_ret=10, verbose=False): total_retriever_time = 0.0 total_retriever_score = 0.0 st_time = time() for i, (question, answer) in enumerate(qa_list): r_time = time() retrieved_passages = retriever_func(question, n_ret) total_retriever_time += time() - r_time total_retriever_score += scoring_func(retrieved_passages, answer) if verbose and ((i + 1) % 500 == 0 or i <= 1): print( "{:03d}: S-{:.4f} T-{:.4f} | {:.2f}".format( i + 1, total_retriever_score / (i + 1), total_retriever_time / (i + 1), time() - st_time ) ) return {"idf_recall": total_retriever_score / (i + 1), "retrieval_time": total_retriever_time / (i + 1)} # build a support document for the question out of Wikipedia snippets def query_qa_dense_index( question, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20, device="cuda:0" ): q_rep = embed_questions_for_retrieval([question], tokenizer, qa_embedder, device=device) D, I = wiki_index.search(q_rep, 2 * n_results) res_passages = [wiki_passages[int(i)] for i in I[0]] support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] for r, sc in zip(res_list, D[0]): r["score"] = float(sc) return support_doc, res_list def batch_query_qa_dense_index(questions, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): q_rep = embed_questions_for_retrieval(questions, tokenizer, qa_embedder) D, I = wiki_index.search(q_rep, n_results) res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] support_doc_lst = [ "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst ] all_res_lists = [] for (res_passages, dl) in zip(res_passages_lst, D): res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages] for r, sc in zip(res_list, dl): r["score"] = float(sc) all_res_lists += [res_list[:]] return support_doc_lst, all_res_lists # find nearest neighbors of an answer or declarative text in Wikipedia snippets def query_qa_dense_index_nn(passage, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20): a_rep = embed_passages_for_retrieval([passage], tokenizer, qa_embedder) D, I = wiki_index.search(a_rep, 2 * n_results) res_passages = [wiki_passages[int(i)] for i in I[0]] support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] for r, sc, i in zip(res_list, D[0], I[0]): r["passage_id"] = int(i) r["score"] = float(sc) return support_doc, res_list def batch_query_qa_dense_index_nn(passages, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): a_reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder) D, I = wiki_index.search(a_reps, n_results) res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] support_doc_lst = [ "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst ] all_res_lists = [] for (res_passages, dl, il) in zip(res_passages_lst, D, I): res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages] for r, sc, i in zip(res_list, dl, il): r["passage_id"] = int(i) r["score"] = float(sc) all_res_lists += [res_list[:]] return support_doc_lst, all_res_lists
notebooks/longform-qa/lfqa_utils.py/0
{ "file_path": "notebooks/longform-qa/lfqa_utils.py", "repo_id": "notebooks", "token_count": 12846 }
170
<jupyter_start><jupyter_text>Huggingface Sagemaker-sdk - Distributed Training Demo Distributed Question Answering with `transformers` scripts + `Trainer` and `squad` dataset 1. [Introduction](Introduction) 2. [Development Environment and Permissions](Development-Environment-and-Permissions) 1. [Installation](Installation) 2. [Development environment](Development-environment) 3. [Permissions](Permissions) 4. [Fine-tuning & starting Sagemaker Training Job](Fine-tuning-\&-starting-Sagemaker-Training-Job) 1. [Creating an Estimator and start a training job](Creating-an-Estimator-and-start-a-training-job) 2. [Estimator Parameters](Estimator-Parameters) 3. [Download fine-tuned model from s3](Download-fine-tuned-model-from-s3) 3. [Attach to old training job to an estimator ](Attach-to-old-training-job-to-an-estimator) 5. [_Coming soon_:Push model to the Hugging Face hub](Push-model-to-the-Hugging-Face-hub) IntroductionWelcome to our end-to-end `distributed` Question-Answering example. In this demo, we will use the Hugging Face `transformers` and `datasets` library together with a custom Amazon sagemaker-sdk extension to fine-tune a pre-trained transformer for question-answering on multiple-gpus. In particular, the pre-trained model will be fine-tuned using the `squad` dataset. The demo will use the new `smdistributed` library to run training on multiple gpus as training scripting we are going to use one of the `transformers` [example scripts from the repository](https://github.com/huggingface/transformers/blob/master/examples/question-answering/run_qa.py).To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. _**NOTE: You can run this demo in Sagemaker Studio, your local machine or Sagemaker Notebook Instances**_ Development Environment and Permissions Installation_*Note:* we only install the required libraries from Hugging Face and AWS. You also need PyTorch or Tensorflow, if you haven´t it installed_<jupyter_code>!pip install "sagemaker>=2.48.0" --upgrade<jupyter_output><empty_output><jupyter_text>Development environment<jupyter_code>import sagemaker.huggingface<jupyter_output><empty_output><jupyter_text>Permissions _If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>Fine-tuning & starting Sagemaker Training JobIn order to create a sagemaker training job we need an `HuggingFace` Estimator. The Estimator handles end-to-end Amazon SageMaker training and deployment tasks. In a Estimator we define, which fine-tuning script should be used as `entry_point`, which `instance_type` should be used, which `hyperparameters` are passed in .....```pythonhuggingface_estimator = HuggingFace(entry_point='train.py', source_dir='./scripts', sagemaker_session=sess, base_job_name='huggingface-sdk-extension', instance_type='ml.p3.2xlarge', instance_count=1, transformers_version='4.4', pytorch_version='1.6.0', py_version='py36', role=role, hyperparameters = {'epochs': 1, 'train_batch_size': 32, 'model_name':'distilbert-base-uncased' })```When we create a SageMaker training job, SageMaker takes care of starting and managing all the required ec2 instances for us with the `huggingface` container, uploads the provided fine-tuning script `train.py` and downloads the data from our `sagemaker_session_bucket` into the container at `/opt/ml/input/data`. Then, it starts the training job by running. ```python/opt/conda/bin/python train.py --epochs 1 --model_name distilbert-base-uncased --train_batch_size 32```The `hyperparameters` you define in the `HuggingFace` estimator are passed in as named arguments. Sagemaker is providing useful properties about the training environment through various environment variables, including the following:* `SM_MODEL_DIR`: A string that represents the path where the training job writes the model artifacts to. After training, artifacts in this directory are uploaded to S3 for model hosting.* `SM_NUM_GPUS`: An integer representing the number of GPUs available to the host.* `SM_CHANNEL_XXXX:` A string that represents the path to the directory that contains the input data for the specified channel. For example, if you specify two input channels in the HuggingFace estimator’s fit call, named `train` and `test`, the environment variables `SM_CHANNEL_TRAIN` and `SM_CHANNEL_TEST` are set.To run your training job locally you can define `instance_type='local'` or `instance_type='local_gpu'` for gpu usage. _Note: this does not working within SageMaker Studio_ Creating an Estimator and start a training jobIn this example we are going to use the capability to download/use a fine-tuning script from a `git`- repository. We are using the `run_qa.py` from the `transformers` example scripts. You can find the code [here](https://github.com/huggingface/transformers/tree/master/examples/question-answering).<jupyter_code>from sagemaker.huggingface import HuggingFace # hyperparameters, which are passed into the training job hyperparameters={ 'model_name_or_path': 'bert-large-uncased-whole-word-masking', 'dataset_name':'squad', 'do_train': True, 'do_eval': True, 'fp16': True, 'per_device_train_batch_size': 4, 'per_device_eval_batch_size': 4, 'num_train_epochs': 2, 'max_seq_length': 384, 'max_steps': 100, 'pad_to_max_length': True, 'doc_stride': 128, 'output_dir': '/opt/ml/model' } # configuration for running training on smdistributed Data Parallel distribution = {'smdistributed':{'dataparallel':{ 'enabled': True }}} # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.26.0'} # instance configurations instance_type='ml.p3.16xlarge' instance_count=2 volume_size=200 # metric definition to extract the results metric_definitions=[ {"Name": "train_runtime", "Regex": "train_runtime.*=\D*(.*?)$"}, {'Name': 'train_samples_per_second', 'Regex': "train_samples_per_second.*=\D*(.*?)$"}, {'Name': 'epoch', 'Regex': "epoch.*=\D*(.*?)$"}, {'Name': 'f1', 'Regex': "f1.*=\D*(.*?)$"}, {'Name': 'exact_match', 'Regex': "exact_match.*=\D*(.*?)$"}] # estimator huggingface_estimator = HuggingFace(entry_point='run_qa.py', source_dir='./examples/pytorch/question-answering', git_config=git_config, metric_definitions=metric_definitions, instance_type=instance_type, instance_count=instance_count, volume_size=volume_size, role=role, transformers_version='4.26.0', pytorch_version='1.13.1', py_version='py39', distribution= distribution, hyperparameters = hyperparameters) # starting the train job huggingface_estimator.fit()<jupyter_output><empty_output><jupyter_text>Deploying the endpointTo deploy our endpoint, we call `deploy()` on our HuggingFace estimator object, passing in our desired number of instances and instance type.<jupyter_code>predictor = huggingface_estimator.deploy(1,"ml.g4dn.xlarge")<jupyter_output><empty_output><jupyter_text>Then, we use the returned predictor object to call the endpoint.<jupyter_code>data = { "inputs": { "question": "What is used for inference?", "context": "My Name is Philipp and I live in Nuremberg. This model is used with sagemaker for inference." } } predictor.predict(data)<jupyter_output><empty_output><jupyter_text>Finally, we delete the endpoint again.<jupyter_code>predictor.delete_model() predictor.delete_endpoint()<jupyter_output><empty_output>
notebooks/sagemaker/03_distributed_training_data_parallelism/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/03_distributed_training_data_parallelism/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 3514 }
171
accelerate launch --config_file accelerate_config.yaml run_seq2seq_no_trainer.py \ --dataset_name "smangrul/MuDoConv" \ --max_source_length 128 \ --source_prefix "chatbot: " \ --max_target_length 64 \ --val_max_target_length 64 \ --val_min_target_length 20 \ --n_val_batch_generations 5 \ --n_train 10000 \ --n_val 1000 \ --pad_to_max_length True\ --num_beams 10 \ --model_name_or_path "facebook/blenderbot-400M-distill" \ --per_device_train_batch_size 16 \ --per_device_eval_batch_size 16 \ --learning_rate 1e-6 \ --weight_decay 0.0 \ --num_train_epochs 1 \ --gradient_accumulation_steps 1 \ --num_warmup_steps 100 \ --output_dir "/opt/ml/model" \ --seed 25 \ --logging_steps 100 \ --report_name "blenderbot_400M_finetuning"
notebooks/sagemaker/22_accelerate_sagemaker_examples/src/seq2seq/launch.sh/0
{ "file_path": "notebooks/sagemaker/22_accelerate_sagemaker_examples/src/seq2seq/launch.sh", "repo_id": "notebooks", "token_count": 355 }
172
from transformers import AutoModelForCausalLM, AutoTokenizer import torch def model_fn(model_dir): # load model and processor from model_dir model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained(model_dir) return model, tokenizer def predict_fn(data, model_and_tokenizer): # unpack model and tokenizer model, tokenizer = model_and_tokenizer # process input inputs = data.pop("inputs", data) parameters = data.pop("parameters", None) # preprocess input_ids = tokenizer(inputs, return_tensors="pt").input_ids.to(model.device) # pass inputs with all kwargs in data if parameters is not None: outputs = model.generate(input_ids, **parameters) else: outputs = model.generate(input_ids) # postprocess the prediction prediction = tokenizer.decode(outputs[0], skip_special_tokens=True) return [{"generated_text": prediction}]
notebooks/sagemaker/24_train_bloom_peft_lora/scripts/inference.py/0
{ "file_path": "notebooks/sagemaker/24_train_bloom_peft_lora/scripts/inference.py", "repo_id": "notebooks", "token_count": 348 }
173
<jupyter_start><jupyter_text>Evaluate LLMs with Hugging Face Lighteval on Amazon SageMakerIn this sagemaker example, we are going to learn how to evaluate LLMs using Hugging Face [lighteval](https://github.com/huggingface/lighteval/tree/main). LightEval is a lightweight LLM evaluation suite that powers [Hugging Face Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). Evaluating LLMs is crucial for understanding their capabilities and limitations, yet it poses significant challenges due to their complex and opaque nature. LightEval facilitates this evaluation process by enabling LLMs to be assessed on acamedic benchmarks like MMLU or IFEval, providing a structured approach to gauge their performance across diverse tasks.In Detail you will learn how to:1. Setup Development Environment2. Prepare the evaluation configuraiton3. Evaluate Zephyr 7B on TruthfulQA on Amazon SageMaker<jupyter_code>!pip install sagemaker --upgrade --quiet<jupyter_output><empty_output><jupyter_text>If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it.<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>2. Prepare the evaluation configuraiton[LightEval](https://github.com/huggingface/lighteval/tree/main) includes script to evaluate LLMs on common benchmarks like MMLU, Truthfulqa, IFEval, and more. It is used to evaluate models on the [Hugging Face Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). lighteval isy built on top of the great [Eleuther AI Harness](https://github.com/EleutherAI/lm-evaluation-harness) with some additional features and improvements. You can find all available benchmarks [here](https://github.com/huggingface/lighteval/blob/main/tasks_examples/all_tasks.txt). We are going to use Amazon SageMaker Managed Training to evaluate the model. Therefore we will leverage the script available in [lighteval](https://github.com/huggingface/lighteval/blob/main/run_evals_accelerate.py). The Hugging Face DLC is not having lighteval installed. This means need to provide a `requirements.txt` file to install the required dependencies.First lets load the `run_evals_accelerate.py` script and create a `requirements.txt` file with the required dependencies.<jupyter_code>import os import requests as r lighteval_version = "0.2.0" # create scripts directory if not exists os.makedirs("scripts", exist_ok=True) # load custom scripts from git raw_github_url = f"https://raw.githubusercontent.com/huggingface/lighteval/v{lighteval_version}/run_evals_accelerate.py" res = r.get(raw_github_url) with open("scripts/run_evals_accelerate.py", "w") as f: f.write(res.text) # write requirements.txt with open("scripts/requirements.txt", "w") as f: f.write(f"lighteval=={lighteval_version}")<jupyter_output><empty_output><jupyter_text>In lighteval, the evaluation is done by running the `run_evals_accelerate.py` script. The script takes a `task` argument which is defined as `suite|task|num_few_shot|{0 or 1 to automatically reduce num_few_shot if prompt is too long}`. Alternatively, you can also provide a path to a txt file with the tasks you want to evaluate the model on, which we are going to do. This makes it easier for you to extend the evaluation to other benchmarks.We are going to evaluate the model on the Truthfulqa benchmark with 0 few-shot examples. [TruthfulQA](https://paperswithcode.com/dataset/truthfulqa) is a benchmark designed to measure whether a language model generates truthful answers to questions, encompassing 817 questions across 38 categories including health, law, finance, and politics​​.<jupyter_code>with open("scripts/tasks.txt", "w") as f: f.write(f"lighteval|truthfulqa:mc|0|0")<jupyter_output><empty_output><jupyter_text>To evaluate a model on all the benchmarks of the Open LLM Leaderboard you can copy this [file](https://github.com/huggingface/lighteval/blob/v0.2.0/tasks_examples/open_llm_leaderboard_tasks.txt) 3. Evaluate Zephyr 7B on TruthfulQA on Amazon SageMakerIn this example we are going to evaluate the [HuggingFaceH4/zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) on the MMLU benchmark, which is part of the Open LLM Leaderboard. In addition to the `task` argument we need to define: * `model_args`: Hugging Face Model ID or path, defined as `pretrained=HuggingFaceH4/zephyr-7b-beta`* `model_dtype`: The model data type, defined as `bfloat16`, `float16` or `float32`* `output_dir`: The directory where the evaluation results will be saved, e.g. `/opt/ml/model` Lightevals can also evaluat peft models or use `chat_templates` you find more about it [here](https://github.com/huggingface/lighteval/blob/v0.2.0/run_evals_accelerate.py).<jupyter_code>from sagemaker.huggingface import HuggingFace # hyperparameters, which are passed into the training job hyperparameters = { 'model_args': "pretrained=HuggingFaceH4/zephyr-7b-beta", # Hugging Face Model ID 'task': 'tasks.txt', # 'lighteval|truthfulqa:mc|0|0', 'model_dtype': 'bfloat16', # Torch dtype to load model weights 'output_dir': '/opt/ml/model' # Directory, which sagemaker uploads to s3 after training } # create the Estimator huggingface_estimator = HuggingFace( entry_point = 'run_evals_accelerate.py', # train script source_dir = 'scripts', # directory which includes all the files needed for training instance_type = 'ml.g5.4xlarge', # instances type used for the training job instance_count = 1, # the number of instances used for training base_job_name = "lighteval", # the name of the training job role = role, # Iam role used in training job to access AWS ressources, e.g. S3 volume_size = 300, # the size of the EBS volume in GB transformers_version = '4.36', # the transformers version used in the training job pytorch_version = '2.1', # the pytorch_version version used in the training job py_version = 'py310', # the python version used in the training job hyperparameters = hyperparameters, environment = { "HUGGINGFACE_HUB_CACHE": "/tmp/.cache", # "HF_TOKEN": "REPALCE_WITH_YOUR_TOKEN" # needed for private models }, # set env variable to cache models in /tmp )<jupyter_output><empty_output><jupyter_text>We can now start our evaluation job, with the `.fit()`.<jupyter_code># starting the train job with our uploaded datasets as input huggingface_estimator.fit()<jupyter_output><empty_output><jupyter_text>After the evaluation job is finished, we can download the evaluation results from the S3 bucket. Lighteval will save the results and generations in the `output_dir`. The results are savedas json and include detailed information about each task and the model's performance. The results are available in the `results` key.<jupyter_code>import tarfile import json import io import os from sagemaker.s3 import S3Downloader # download results from s3 results_tar = S3Downloader.read_bytes(huggingface_estimator.model_data) model_id = hyperparameters["model_args"].split("=")[1] result={} # Use tarfile to open the tar content directly from bytes with tarfile.open(fileobj=io.BytesIO(results_tar), mode="r:gz") as tar: # Iterate over items in tar archive to find your json file by its path for member in tar.getmembers(): # get path of results based on model id used to evaluate if os.path.join("details", model_id) in member.name and member.name.endswith('.json'): # Extract the file content f = tar.extractfile(member) if f is not None: content = f.read() result = json.loads(content) break # print results print(result["results"]) # {'lighteval|truthfulqa:mc|0': {'truthfulqa_mc1': 0.40636474908200737, 'truthfulqa_mc1_stderr': 0.017193835812093897, 'truthfulqa_mc2': 0.5747003398184238, 'truthfulqa_mc2_stderr': 0.015742356478301463}}<jupyter_output><empty_output>
notebooks/sagemaker/30_evaluate_llms_with_lighteval/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/30_evaluate_llms_with_lighteval/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 3115 }
174
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Contribute to PEFT We are happy to accept contributions to PEFT. If you plan to contribute, please read this to make the process as smooth as possible. ## Installation For code contributions to PEFT, you should choose the ["source"](../install#source) installation method. If you are new to creating a pull request, follow the [Creating a pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) guide by GitHub. ## Tests and code quality checks Regardless of the contribution type (unless it’s only about the docs), you should run tests and code quality checks before creating a PR to ensure your contribution doesn’t break anything and follows the project standards. We provide a Makefile to execute the necessary tests. Run the code below for the unit test: ```sh make test ``` Run one of the following to either only check or check and fix code quality and style: ```sh make quality # just check make style # check and fix ``` You can also set up [`pre-commit`](https://pre-commit.com/) to run these fixes automatically as Git commit hooks. ```bash $ pip install pre-commit $ pre-commit install ``` Running all the tests can take a couple of minutes, so during development it can be more efficient to only run tests specific to your change: ```sh pytest tests/ -k <name-of-test> ``` This should finish much quicker and allow for faster iteration. However, you should still run the whole test suite before creating a PR because your change can inadvertently break tests that at first glance are unrelated. If your change is specific to a hardware setting (e.g., it requires CUDA), take a look at [tests/test_gpu_examples.py](https://github.com/huggingface/peft/blob/1c1c7fdaa6e6abaa53939b865dee1eded82ad032/tests/test_gpu_examples.py) and [tests/test_common_gpu.py](https://github.com/huggingface/peft/blob/1c1c7fdaa6e6abaa53939b865dee1eded82ad032/tests/test_common_gpu.py) to see if it makes sense to add tests there. If your change could have an effect on saving and loading models, please run the tests with the `--regression` flag to trigger regression tests. It can happen that while you’re working on your PR, the underlying code base changes due to other changes being merged. If that happens – especially when there is a merge conflict – please update your branch with the latest changes. This can be a merge or a rebase, and we'll squash and merge the PR once it’s ready. ## PR description When opening a PR, please provide a nice description of the change you're proposing. If it relates to other issues or PRs, please reference them. Providing a good description not only helps the reviewers review your code better and faster, it can also be used later (as a basis) for the commit message which helps with long term maintenance of the project. If your code makes some non-trivial changes, it may also be a good idea to add comments to the code to explain those changes. For example, if you had to iterate on your implementation multiple times because the most obvious way didn’t work, it’s a good indication that a code comment is needed. ## Bugfixes Please give a description of the circumstances that led to the bug. If there is an existing issue, please link to it (e.g., “Resolves #12345”). Ideally when a bugfix is provided, it should be accompanied by a test for the bug. The test should fail with the current code and pass with the bugfix. Add a comment to the test that references the issue or PR. Without a test, it is more difficult to prevent regressions in the future. ## Add a new fine-tuning method New parameter-efficient fine-tuning methods are developed all the time. If you would like to add a new and promising method to PEFT, please follow these steps. 1. Before you start to implement the new method, please open a GitHub issue with your proposal. This way, the maintainers can give you some early feedback. 2. Please add a link to the source (usually a paper) of the method. Some evidence should be provided there is general interest in using the method. We will not add new methods that are freshly published, but there is no evidence of demand for it. 3. When implementing the method, it makes sense to look for existing implementations that already exist as a guide. Moreover, when you structure your code, please take inspiration from the other PEFT methods. For example, if your method is similar to LoRA, it makes sense to structure your code similarly or even reuse some functions or classes where it makes sense (some code duplication is okay, but don’t overdo it). 4. Ideally, in addition to the implementation of the new method, there should also be examples (notebooks, scripts), documentation, and an extensive test suite that proves the method works with a variety of tasks. However, this can be more challenging so it is acceptable to only provide the implementation and at least one working example. Documentation and tests can be added in follow up PRs. 5. Once you have something that seems to be working, don’t hesitate to create a draft PR even if it’s not in a mergeable state yet. The maintainers are happy to give you feedback and guidance along the way. ## Add other features It is best if you first open an issue on GitHub with a proposal to add the new feature. This way, you can discuss with the maintainers if it makes sense to add the feature before spending too much time on implementing it. New features should generally be accompanied by tests and documentation or examples. Without the latter, users will have a hard time discovering your cool new feature. Changes to the code should be implemented in a backward-compatible way. For example, existing code should continue to work the same way after the feature is merged.
peft/docs/source/developer_guides/contributing.md/0
{ "file_path": "peft/docs/source/developer_guides/contributing.md", "repo_id": "peft", "token_count": 1620 }
175
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Prompt-based methods A prompt can describe a task or provide an example of a task you want the model to learn. Instead of manually creating these prompts, soft prompting methods add learnable parameters to the input embeddings that can be optimized for a specific task while keeping the pretrained model's parameters frozen. This makes it both faster and easier to finetune large language models (LLMs) for new downstream tasks. The PEFT library supports several types of prompting methods (p-tuning, prefix tuning, prompt tuning) and you can learn more about how these methods work conceptually in the [Soft prompts](../conceptual_guides/prompting) guide. If you're interested in applying these methods to other tasks and use cases, take a look at our [notebook collection](https://huggingface.co/spaces/PEFT/soft-prompting)! This guide will show you how to train a causal language model - with a soft prompting method - to *generate a classification* for whether a tweet is a complaint or not. <Tip> Some familiarity with the general process of training a causal language model would be really helpful and allow you to focus on the soft prompting methods. If you're new, we recommend taking a look at the [Causal language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling) guide first from the Transformers documentation. When you're ready, come back and see how easy it is to drop PEFT in to your training! </Tip> Before you begin, make sure you have all the necessary libraries installed. ```bash pip install -q peft transformers datasets ``` ## Dataset For this guide, you'll use the `twitter_complaints` subset of the [RAFT](https://huggingface.co/datasets/ought/raft) dataset. The `twitter_complaints` subset contains tweets labeled as `complaint` and `no complaint` and you can check out the [dataset viewer](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) for a better idea of what the data looks like. Use the [`~datasets.load_dataset`] function to load the dataset and create a new `text_label` column so it is easier to understand what the `Label` values, `1` and `2` mean. ```py from datasets import load_dataset ds = load_dataset("ought/raft", "twitter_complaints") classes = [k.replace("_", " ") for k in ds["train"].features["Label"].names] ds = ds.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) ds["train"][0] {"Tweet text": "@HMRCcustomers No this is my first job", "ID": 0, "Label": 2, "text_label": "no complaint"} ``` Load a tokenizer, define the padding token to use, and determine the maximum length of the tokenized label. ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) print(target_max_length) ``` Create a preprocessing function that tokenizes the tweet text and labels, pad the inputs and labels in each batch, create an attention mask, and truncate sequences to the `max_length`. Then convert the `input_ids`, `attention_mask`, and `labels` to PyTorch tensors. ```py import torch max_length = 64 def preprocess_function(examples, text_column="Tweet text", label_column="text_label"): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(inputs) labels = tokenizer(targets) classes = [k.replace("_", " ") for k in ds["train"].features["Label"].names] for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length]) model_inputs["labels"] = labels["input_ids"] return model_inputs ``` Apply the preprocessing function to the entire dataset with the [`~datasets.Dataset.map`] function, and remove the unprocessed columns because the model won't need them. ```py processed_ds = ds.map( preprocess_function, batched=True, num_proc=1, remove_columns=ds["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) ``` Finally, create a training and evaluation [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). You can set `pin_memory=True` to speed up the data transfer to the GPU during training if the samples in your dataset are on a CPU. ```py from torch.utils.data import DataLoader from transformers import default_data_collator train_ds = processed_ds["train"] eval_ds = processed_ds["test"] batch_size = 16 train_dataloader = DataLoader(train_ds, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) eval_dataloader = DataLoader(eval_ds, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) ``` ## Model Now let's load a pretrained model to use as the base model for the soft prompt method. This guide uses the [bigscience/bloomz-560m](https://huggingface.co/bigscience/bloomz-560m) model, but you can use any causal language model you want. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") ``` ### PEFT configuration and model For any PEFT method, you'll need to create a configuration which contains all the parameters that specify how the PEFT method should be applied. Once the configuration is setup, pass it to the [`~peft.get_peft_model`] function along with the base model to create a trainable [`PeftModel`]. <Tip> Call the [`~PeftModel.print_trainable_parameters`] method to compare the number of trainable parameters of [`PeftModel`] versus the number of parameters in the base model! </Tip> <hfoptions id="configurations"> <hfoption id="p-tuning"> [P-tuning](../conceptual_guides/prompting#p-tuning) adds a trainable embedding tensor where the prompt tokens can be added anywhere in the input sequence. Create a [`PromptEncoderConfig`] with the task type, the number of virtual tokens to add and learn, and the hidden size of the encoder for learning the prompt parameters. ```py from peft import PromptEncoderConfig, get_peft_model peft_config = PromptEncoderConfig(task_type="CAUSAL_LM", num_virtual_tokens=20, encoder_hidden_size=128) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 300,288 || all params: 559,514,880 || trainable%: 0.05366935013417338" ``` </hfoption> <hfoption id="prefix tuning"> [Prefix tuning](../conceptual_guides/prompting#prefix-tuning) adds task-specific parameters in all of the model layers, which are optimized by a separate feed-forward network. Create a [`PrefixTuningConfig`] with the task type and number of virtual tokens to add and learn. ```py from peft import PrefixTuningConfig, get_peft_model peft_config = PrefixTuningConfig(task_type="CAUSAL_LM", num_virtual_tokens=20) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 983,040 || all params: 560,197,632 || trainable%: 0.1754809274167014" ``` </hfoption> <hfoption id="prompt tuning"> [Prompt tuning](../conceptual_guides/prompting#prompt-tuning) formulates all tasks as a *generation* task and it adds a task-specific prompt to the input which is updated independently. The `prompt_tuning_init_text` parameter specifies how to finetune the model (in this case, it is classifying whether tweets are complaints or not). For the best results, the `prompt_tuning_init_text` should have the same number of tokens that should be predicted. To do this, you can set `num_virtual_tokens` to the number of tokens of the `prompt_tuning_init_text`. Create a [`PromptTuningConfig`] with the task type, the initial prompt tuning text to train the model with, the number of virtual tokens to add and learn, and a tokenizer. ```py from peft import PromptTuningConfig, PromptTuningInit, get_peft_model prompt_tuning_init_text = "Classify if the tweet is a complaint or no complaint.\n" peft_config = PromptTuningConfig( task_type="CAUSAL_LM", prompt_tuning_init=PromptTuningInit.TEXT, num_virtual_tokens=len(tokenizer(prompt_tuning_init_text)["input_ids"]), prompt_tuning_init_text=prompt_tuning_init_text, tokenizer_name_or_path="bigscience/bloomz-560m", ) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 8,192 || all params: 559,222,784 || trainable%: 0.0014648902430985358" ``` </hfoption> </hfoptions> ### Training Set up an optimizer and learning rate scheduler. ```py from transformers import get_linear_schedule_with_warmup lr = 3e-2 num_epochs = 50 optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) ``` Move the model to the GPU and create a training loop that reports the loss and perplexity for each epoch. ```py from tqdm import tqdm device = "cuda" model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") ``` ## Share your model Once training is complete, you can upload your model to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method. You'll need to login to your Hugging Face account first and enter your token when prompted. ```py from huggingface_hub import notebook_login account = <your-hf-account-name> peft_model_id = f"{account}/bloomz-560-m-peft-method" model.push_to_hub(peft_model_id) ``` If you check the model file size in the repository, you’ll see that it is a lot smaller than a full sized model! <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">For example, the adapter weights for a opt-350m model stored on the Hub are only ~6MB compared to the full model size which can be ~700MB.</figcaption> </div> ## Inference Let's load the model for inference and test it out on a tweet! ```py from peft import AutoPeftModelForCausalLM model = AutoPeftModelForCausalLM.from_pretrained("peft_model_id").to("cuda") tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") i = 15 inputs = tokenizer(f'{text_column} : {ds["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(ds["test"][i]["Tweet text"]) "@NYTsupport i have complained a dozen times &amp; yet my papers are still thrown FAR from my door. Why is this so hard to resolve?" ``` Call the [`~transformers.GenerationMixin.generate`] method to generate the predicted classification label. ```py with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)) "['Tweet text : @NYTsupport i have complained a dozen times &amp; yet my papers are still thrown FAR from my door. Why is this so hard to resolve? Label : complaint']" ```
peft/docs/source/task_guides/prompt_based_methods.md/0
{ "file_path": "peft/docs/source/task_guides/prompt_based_methods.md", "repo_id": "peft", "token_count": 4607 }
176
import gc import os import sys import threading import psutil import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from peft import LoraConfig, TaskType, get_peft_model def levenshtein_distance(str1, str2): # TC: O(N^2) # SC: O(N) if str1 == str2: return 0 num_rows = len(str1) + 1 num_cols = len(str2) + 1 dp_matrix = list(range(num_cols)) for i in range(1, num_rows): prev = dp_matrix[0] dp_matrix[0] = i for j in range(1, num_cols): temp = dp_matrix[j] if str1[i - 1] == str2[j - 1]: dp_matrix[j] = prev else: dp_matrix[j] = min(prev, dp_matrix[j], dp_matrix[j - 1]) + 1 prev = temp return dp_matrix[num_cols - 1] def get_closest_label(eval_pred, classes): min_id = sys.maxsize min_edit_distance = sys.maxsize for i, class_label in enumerate(classes): edit_distance = levenshtein_distance(eval_pred.strip(), class_label) if edit_distance < min_edit_distance: min_id = i min_edit_distance = edit_distance return classes[min_id] # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def main(): accelerator = Accelerator() # model_name_or_path = "bigscience/T0_3B" model_name_or_path = "facebook/bart-large" dataset_name = "twitter_complaints" peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) text_column = "Tweet text" label_column = "text_label" lr = 3e-3 num_epochs = 5 batch_size = 8 seed = 42 do_test = False set_seed(seed) dataset = load_dataset("ought/raft", dataset_name) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, truncation=True) labels = tokenizer( targets, max_length=target_max_length, padding="max_length", truncation=True, return_tensors="pt" ) labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs with accelerator.main_process_first(): processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=True, desc="Running tokenizer on dataset", ) accelerator.wait_for_everyone() train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["train"] test_dataset = processed_datasets["test"] def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True) test_dataloader = DataLoader(test_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True) # creating model model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # optimizer optimizer = torch.optim.AdamW(model.parameters(), lr=lr) # lr scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler ) accelerator.print(model) is_ds_zero_3 = False if getattr(accelerator.state, "deepspeed_plugin", None): is_ds_zero_3 = accelerator.state.deepspeed_plugin.zero_stage == 3 for epoch in range(num_epochs): with TorchTracemalloc() as tracemalloc: model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"GPU Memory before entering the train : {b2mb(tracemalloc.begin)}") accelerator.print(f"GPU Memory consumed at the end of the train (end-begin): {tracemalloc.used}") accelerator.print(f"GPU Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") accelerator.print( f"GPU Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) accelerator.print(f"CPU Memory before entering the train : {b2mb(tracemalloc.cpu_begin)}") accelerator.print(f"CPU Memory consumed at the end of the train (end-begin): {tracemalloc.cpu_used}") accelerator.print(f"CPU Peak Memory consumed during the train (max-begin): {tracemalloc.cpu_peaked}") accelerator.print( f"CPU Total Peak Memory consumed during the train (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}" ) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) accelerator.print(f"{epoch=}: {train_ppl=} {train_epoch_loss=}") model.eval() eval_preds = [] with TorchTracemalloc() as tracemalloc: for _, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = accelerator.unwrap_model(model).generate( **batch, synced_gpus=is_ds_zero_3 ) # synced_gpus=True for DS-stage 3 outputs = accelerator.pad_across_processes(outputs, dim=1, pad_index=tokenizer.pad_token_id) preds = accelerator.gather_for_metrics(outputs).detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"GPU Memory before entering the eval : {b2mb(tracemalloc.begin)}") accelerator.print(f"GPU Memory consumed at the end of the eval (end-begin): {tracemalloc.used}") accelerator.print(f"GPU Peak Memory consumed during the eval (max-begin): {tracemalloc.peaked}") accelerator.print( f"GPU Total Peak Memory consumed during the eval (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) accelerator.print(f"CPU Memory before entering the eval : {b2mb(tracemalloc.cpu_begin)}") accelerator.print(f"CPU Memory consumed at the end of the eval (end-begin): {tracemalloc.cpu_used}") accelerator.print(f"CPU Peak Memory consumed during the eval (max-begin): {tracemalloc.cpu_peaked}") accelerator.print( f"CPU Total Peak Memory consumed during the eval (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}" ) correct = 0 total = 0 assert len(eval_preds) == len( dataset["train"][label_column] ), f"{len(eval_preds)} != {len(dataset['train'][label_column])}" for pred, true in zip(eval_preds, dataset["train"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 accelerator.print(f"{accuracy=}") accelerator.print(f"{eval_preds[:10]=}") accelerator.print(f"{dataset['train'][label_column][:10]=}") if do_test: model.eval() test_preds = [] for _, batch in enumerate(tqdm(test_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = accelerator.unwrap_model(model).generate( **batch, synced_gpus=is_ds_zero_3 ) # synced_gpus=True for DS-stage 3 outputs = accelerator.pad_across_processes(outputs, dim=1, pad_index=tokenizer.pad_token_id) preds = accelerator.gather(outputs).detach().cpu().numpy() test_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) test_preds_cleaned = [] for _, pred in enumerate(test_preds): test_preds_cleaned.append(get_closest_label(pred, classes)) test_df = dataset["test"].to_pandas() assert len(test_preds_cleaned) == len(test_df), f"{len(test_preds_cleaned)} != {len(test_df)}" test_df[label_column] = test_preds_cleaned test_df["text_labels_orig"] = test_preds accelerator.print(test_df[[text_column, label_column]].sample(20)) pred_df = test_df[["ID", label_column]] pred_df.columns = ["ID", "Label"] os.makedirs(f"data/{dataset_name}", exist_ok=True) pred_df.to_csv(f"data/{dataset_name}/predictions.csv", index=False) accelerator.wait_for_everyone() # Option1: Pushing the model to Hugging Face Hub # model.push_to_hub( # f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"), # token = "hf_..." # ) # token (`bool` or `str`, *optional*): # `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated # when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` # is not specified. # Or you can get your token from https://huggingface.co/settings/token # Option2: Saving the model locally peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace( "/", "_" ) model.save_pretrained(peft_model_id) accelerator.wait_for_everyone() if __name__ == "__main__": main()
peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py/0
{ "file_path": "peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py", "repo_id": "peft", "token_count": 5607 }
177
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Dict, List, Union # datasets imports import datasets # metric imports import evaluate import numpy as np import torch import transformers import wandb # accelerate imports from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset # hf imports from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( BitsAndBytesConfig, SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.utils import get_full_repo_name # peft imports from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model logger = get_logger(__name__, log_level="INFO") def parse_args(): parser = argparse.ArgumentParser(description="Whisper Fine-Tuning with AdaLora") parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument("--language", type=str, help="Language to use for training; e.g., 'Hindi' ", required=True) parser.add_argument("--language_abbr", type=str, help="Language to use for training; e.g., 'hi' ", required=True) parser.add_argument( "--task", type=str, default="transcribe", help="Task to use for training; e.g., 'transcribe' ", required=False ) parser.add_argument( "--dataset_name", type=str, default="mozilla-foundation/common_voice_11_0", help="Dataset to use for training; e.g., 'whisper' ", required=False, ) parser.add_argument( "--dataset_in_streaming_mode", action="store_true", help="Whether to use streaming mode for the dataset.", ) parser.add_argument( "--do_lower_case", action="store_true", help="lowercase the transcribed text before tokenizing" ) parser.add_argument( "--do_remove_punctuation", action="store_true", help="remove punctuation from the transcribed text" ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--max_audio_input_length", type=float, default=30.0, help="Maximum audio length in seconds.") parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--buffer_size", type=int, default=5000, help="Number of samples to prefetch in the streaming mode.", ) parser.add_argument( "--dataloader_pin_memory", action="store_true", help="Whether or not to pin memory for the DataLoader.", ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help="Number of subprocesses to use for data loading.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--load_best_model", action="store_true", help="Whether to load the best model at the end of training", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--logging_steps", type=int, default=100, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--evaluation_steps", type=int, default=500, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) # lora/adalora specific args parser.add_argument( "--use_peft", action="store_true", help="Whether to use PEFT", ) parser.add_argument( "--use_adalora", action="store_true", help="Whether to use AdaLoRA or LoRA. If set, uses AdaLoRA instead of the default LoRA.", ) parser.add_argument( "--init_r", type=int, default=12, help="Initial AdaLoRA rank", ) parser.add_argument( "--target_r", type=int, default=4, help="Target AdaLoRA rank", ) parser.add_argument( "--tinit", type=int, default=200, help="number of warmup steps for AdaLoRA wherein no pruning is performed", ) parser.add_argument( "--tfinal", type=int, default=1000, help=" fix the resulting budget distribution and fine-tune the model for tfinal steps when using AdaLoRA ", ) parser.add_argument( "--delta_t", type=int, default=10, help="interval of steps for AdaLoRA to update rank", ) parser.add_argument( "--lora_alpha", type=int, default=32, help="LORA alpha", ) parser.add_argument( "--r", type=int, default=8, help="LORA rank", ) parser.add_argument( "--lora_dropout", type=float, default=0.1, help="LORA dropout", ) parser.add_argument( "--orth_reg_weight", type=float, default=0.5, help="Orthogonal regularization weight", ) parser.add_argument( "--debug_mode", action="store_true", help="Whether to use debug mode", ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def load_streaming_dataset(dataset_name, dataset_config_name, split, **kwargs): if "+" in split: # load multiple splits separated by the `+` symbol *with* streaming mode dataset_splits = [ load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=True, **kwargs) for split_name in split.split("+") ] # interleave multiple splits to form one dataset interleaved_dataset = interleave_datasets(dataset_splits) return interleaved_dataset else: # load a single split *with* streaming mode dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=True, **kwargs) return dataset def prepare_dataset_wrapper(do_lower_case, do_remove_punctuation, processor, normalizer): def prepare_dataset(batch): # load and (possibly) resample audio data to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = processor.feature_extractor( audio["array"], sampling_rate=audio["sampling_rate"] ).input_features[0] # compute input length of audio sample in seconds batch["input_length"] = len(audio["array"]) / audio["sampling_rate"] # optional pre-processing steps transcription = batch["sentence"] if do_lower_case: transcription = transcription.lower() if do_remove_punctuation: transcription = normalizer(transcription).strip() # encode target text to label ids batch["labels"] = processor.tokenizer(transcription).input_ids return batch return prepare_dataset def save_model_hook(models, weights, output_dir): for model in models: model.save_pretrained(output_dir) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): while len(models) > 0: model = models.pop() # pop models so that they are not loaded again PeftModel.from_pretrained(model.base_model.model, input_dir) @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need different padding methods # first treat the audio inputs by simply returning torch tensors input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") # get the tokenized label sequences label_features = [{"input_ids": feature["labels"]} for feature in features] # pad the labels to max length labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch def get_audio_length_processor(max_input_length): def is_audio_in_length_range(length): return length < max_input_length return is_audio_in_length_range def evaluation_loop(model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator): model.eval() predictions = [] references = [] normalized_predictions = [] normalized_references = [] for _, batch in enumerate(tqdm(eval_dataloader)): with torch.cuda.amp.autocast(): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"], forced_decoder_ids=forced_decoder_ids, max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, processor.tokenizer.pad_token_id) decoded_preds = processor.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = processor.tokenizer.batch_decode(labels, skip_special_tokens=True) predictions.extend(decoded_preds) references.extend(decoded_labels) normalized_predictions.extend([normalizer(pred).strip() for pred in decoded_preds]) normalized_references.extend([normalizer(label).strip() for label in decoded_labels]) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute(predictions=predictions, references=references) normalized_wer = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references) eval_metrics = {"eval/wer": wer, "eval/normalized_wer": normalized_wer} if accelerator.get_tracker("wandb"): sample_size = min(len(predictions), 256) ids = [randint(0, len(predictions) - 1) for p in range(0, sample_size)] sample_predictions = [predictions[i] for i in ids] sample_references = [references[i] for i in ids] sample_normalized_predictions = [normalized_predictions[i] for i in ids] sample_normalized_references = [normalized_references[i] for i in ids] table_rows = [ list(r) for r in zip( sample_predictions, sample_references, sample_normalized_predictions, sample_normalized_references ) ] eval_metrics["eval_samples"] = wandb.Table( columns=["predictions", "references", "normalized_predictions", "normalized_references"], rows=table_rows, ) return eval_metrics def main(): args = parse_args() accelerator_kwargs = {"gradient_accumulation_steps": args.gradient_accumulation_steps} if args.with_tracking: accelerator_kwargs["log_with"] = args.report_to accelerator_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(**accelerator_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # load dataset either in streaming mode or not processor = WhisperProcessor.from_pretrained(args.model_name_or_path, language=args.language, task=args.task) normalizer = BasicTextNormalizer() prepare_dataset = prepare_dataset_wrapper(args.do_lower_case, args.do_remove_punctuation, processor, normalizer) is_audio_in_length_range = get_audio_length_processor(args.max_audio_input_length) data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor) if args.dataset_in_streaming_mode: raw_datasets = IterableDatasetDict() loading_method = load_streaming_dataset else: raw_datasets = DatasetDict() loading_method = load_dataset if args.debug_mode: train_split = "train[:100]" test_split = "test[:10]" else: train_split = "train+validation" test_split = "test" raw_datasets["train"] = loading_method( args.dataset_name, args.language_abbr, split=train_split, use_auth_token=True ) raw_datasets["test"] = loading_method(args.dataset_name, args.language_abbr, split=test_split, use_auth_token=True) raw_datasets = raw_datasets.cast_column("audio", Audio(sampling_rate=16000)) logger.info("Dataset loaded: %s", raw_datasets) logger.info(f'{raw_datasets["train"][0]}') vectorized_datasets = raw_datasets.map( prepare_dataset, remove_columns=list(next(iter(raw_datasets.values())).features), num_proc=args.preprocessing_num_workers, ).with_format("torch") if args.dataset_in_streaming_mode: vectorized_datasets["train"] = vectorized_datasets["train"].shuffle( buffer_size=args.buffer_size, seed=args.seed, ) # filter out audio files that are too long from the training set is_audio_in_length_range = get_audio_length_processor(args.max_audio_input_length) vectorized_datasets["train"] = vectorized_datasets["train"].filter( is_audio_in_length_range, input_columns=["input_length"] ) # get dataloaders train_dataloader = DataLoader( vectorized_datasets["train"], batch_size=args.per_device_train_batch_size, shuffle=True, collate_fn=data_collator, num_workers=args.dataloader_num_workers, pin_memory=args.dataloader_pin_memory, ) eval_dataloader = DataLoader( vectorized_datasets["test"], batch_size=args.per_device_eval_batch_size, collate_fn=data_collator, num_workers=args.dataloader_num_workers, pin_memory=args.dataloader_pin_memory, ) # metric metric = evaluate.load("wer") # model model = WhisperForConditionalGeneration.from_pretrained( args.model_name_or_path, quantization_config=BitsAndBytesConfig(load_in_8bit=True) ) model.config.forced_decoder_ids = None model.config.suppress_tokens = [] if len(set(model.hf_device_map.values()).intersection({"cpu", "disk"})) > 0: raise ValueError("Training on CPU or disk is not supported.") if len(set(model.hf_device_map.values())) > 1: device_map = model.hf_device_map.copy() # required because `labels` are on main execution device (0) while the output of `proj_out` is on other device. # So, this leads to device mismatch error when calculation cross-entropy between logits and labels. # Won't arise during inference as `labels` aren't supplied during that time # instead of changing device of one of the tied modules, I have to do this for all tied modules # else the execution device of remaining tied modules isn't changed device_map["model.decoder.embed_tokens"] = model._hf_hook.execution_device device_map["model.decoder.embed_positions"] = model._hf_hook.execution_device device_map["proj_out"] = model._hf_hook.execution_device dispatch_model(model, device_map=device_map) # preparing peft model if args.use_peft: from peft import prepare_model_for_int8_training model = prepare_model_for_int8_training(model) # as Whisper model uses Conv layer in encoder, checkpointing disables grad computation # to avoid this, make the inputs trainable def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.model.encoder.conv1.register_forward_hook(make_inputs_require_grad) # wrapping model with adalora tuner if args.use_adalora: config = AdaLoraConfig( init_r=args.init_r, target_r=args.target_r, beta1=0.85, beta2=0.85, tinit=args.tinit, tfinal=args.tfinal, deltaT=args.delta_t, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], orth_reg_weight=args.orth_reg_weight, ) else: config = LoraConfig( r=args.r, lora_alpha=args.lora_alpha, target_modules=["q_proj", "v_proj"], lora_dropout=args.lora_dropout, ) model = get_peft_model(model, config) model.print_trainable_parameters() # optimizer optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) if args.max_train_steps is None: num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # scheduler lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) accelerator.print(model) # Note here that the max steps is adjusted by the accelerator's num_processes args.max_train_steps = math.ceil(args.max_train_steps / accelerator.num_processes) if args.use_peft and args.use_adalora: model.base_model.peft_config["default"].total_step = args.max_train_steps # model.base_model.peft_config.total_step = args.max_train_steps # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: run_name = f"run-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}" experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers( "Whisper PEFT Fine-Tuning", config=experiment_config, init_kwargs={"wandb": {"name": run_name}} ) # saving and loading checkpoints for resuming training accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) global_step = 0 starting_epoch = 0 best_metric = None resume_step = 0 forced_decoder_ids = processor.get_decoder_prompt_ids(language=args.language, task=args.task) # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) training_difference = os.path.splitext(path)[0] global_step = resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # We need to adjust the progress bar to the current step progress_bar.update(resume_step) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 running_loss = 0 for step, batch in enumerate(accelerator.skip_first_batches(train_dataloader, num_batches=resume_step)): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() # Update the importance of low-rank matrices # and allocate the budget accordingly. # This is only needed for AdaLora. # Note that this requires parameter gradients. # Hence being called before optimizer.zero_grad(). if args.use_peft and args.use_adalora: model.update_and_allocate(global_step) optimizer.zero_grad() global_step += 1 progress_bar.update(1) if args.with_tracking: step_loss = accelerator.reduce(loss.detach().clone()).item() total_loss += step_loss running_loss += step_loss if global_step % args.checkpointing_steps == 0: output_dir = os.path.join(args.output_dir, f"step_{global_step}") accelerator.save_state(output_dir) if global_step % args.logging_steps == 0: if args.with_tracking: accelerator.log({"train/running_loss": running_loss / args.logging_steps}, step=global_step) running_loss = 0 if global_step % args.evaluation_steps == 0: eval_metrics = evaluation_loop( model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator ) if args.with_tracking: logger.info(f"Step {global_step} eval metrics: {eval_metrics}") accelerator.log(eval_metrics, step=global_step) if best_metric is None or eval_metrics["eval/wer"] < best_metric: best_metric = eval_metrics["eval/wer"] accelerator.save_state(os.path.join(args.output_dir, "best_checkpoint")) model.train() if global_step >= args.max_train_steps: break if args.with_tracking: train_epoch_loss = total_loss / (step + 1) logger.info(f"Epoch {epoch} train loss: {train_epoch_loss}") accelerator.log({"epoch/train_loss": train_epoch_loss}, step=epoch) if args.push_to_hub and epoch <= args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process) # evaluate the model at the end of training eval_metrics = evaluation_loop( model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator ) if args.with_tracking: logger.info(f"Step {global_step} eval metrics: {eval_metrics}") accelerator.log(eval_metrics, step=global_step) if best_metric is None or eval_metrics["eval/wer"] < best_metric: best_metric = eval_metrics["eval/wer"] accelerator.save_state(os.path.join(args.output_dir, "best_checkpoint")) if accelerator.is_main_process: processor.tokenizer.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True ) if args.load_best_model: # load the best model accelerator.load_state(os.path.join(args.output_dir, "best_checkpoint")) model.resize_modules_by_rank_pattern(model.peft_config["default"].rank_pattern, "default") eval_metrics = evaluation_loop( model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator ) if args.with_tracking: best_metrics = {"best_" + k: v for k, v in eval_metrics.items()} accelerator.log(best_metrics, step=global_step) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process) if accelerator.is_main_process: processor.tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: eval_metrics.pop("eval_samples") json.dump(eval_metrics, f) if __name__ == "__main__": main()
peft/examples/int8_training/peft_adalora_whisper_large_training.py/0
{ "file_path": "peft/examples/int8_training/peft_adalora_whisper_large_training.py", "repo_id": "peft", "token_count": 13114 }
178
compute_environment: LOCAL_MACHINE debug: false deepspeed_config: deepspeed_multinode_launcher: standard offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false
peft/examples/sft/configs/deepspeed_config_z3_qlora.yaml/0
{ "file_path": "peft/examples/sft/configs/deepspeed_config_z3_qlora.yaml", "repo_id": "peft", "token_count": 331 }
179
<jupyter_start><jupyter_text>IntroductionIn this notebook, we are going to fine-tune the LayoutLM model by Microsoft Research on the [FUNSD](https://guillaumejaume.github.io/FUNSD/) dataset, which is a collection of annotated form documents. The goal of our model is to learn the annotations of a number of labels ("question", "answer", "header" and "other") on those forms, such that it can be used to annotate unseen forms in the future.* Original LayoutLM paper: https://arxiv.org/abs/1912.13318* Original FUNSD paper: https://arxiv.org/abs/1905.13538 Install librariesCurrently you have to first install the `unilm` package, and then the `transformers` package (which updates the outdated `transformers` package that is included in the `unilm` package). The reason we also install the `unilm` package is because we need its preprocessing files. I've forked it, and removed some statements which introduced some issues.<jupyter_code># ! rm -r unilm # ! pip install unilm<jupyter_output><empty_output><jupyter_text>Getting the dataHere we download the data of the [FUNSD dataset](https://guillaumejaume.github.io/FUNSD/) from the web. This results in a directory called "data" being created, which has 2 subdirectories, one for training and one for testing. Each of those has 2 subdirectories in turn, one containing the images as png files and one containing the annotations in json format.<jupyter_code># ! wget https://guillaumejaume.github.io/FUNSD/dataset.zip # ! unzip dataset.zip && mv dataset data && rm -rf dataset.zip __MACOSX<jupyter_output><empty_output><jupyter_text>Let's take a look at a training example. For this, we are going to use PIL (Python Image Library).<jupyter_code>from PIL import Image, ImageDraw, ImageFont import os base_path = "/home/sourab/temp/data/dataset" image = Image.open(os.path.join(base_path, "training_data/images/0000971160.png")) image = image.convert("RGB") image<jupyter_output><empty_output><jupyter_text>Now let's plot its corresponding annotations. Basically, if you type `data['form']`, you get a list of all general annotations. Each general annotation has a label, a bounding box, and one or more words, which in also have their own bounding box. The bounding boxes are in [xleft, ytop, xright, ybottom] format.<jupyter_code>import json with open(os.path.join(base_path, "training_data/annotations/0000971160.json")) as f: data = json.load(f) for annotation in data["form"]: print(annotation)<jupyter_output><empty_output><jupyter_text>The PIL library has a handy ImageDraw module, which -you guessed it- allows to draw things (such as rectangles) on an image:<jupyter_code>draw = ImageDraw.Draw(image, "RGBA") font = ImageFont.load_default() label2color = {"question": "blue", "answer": "green", "header": "orange", "other": "violet"} for annotation in data["form"]: label = annotation["label"] general_box = annotation["box"] draw.rectangle(general_box, outline=label2color[label], width=2) draw.text((general_box[0] + 10, general_box[1] - 10), label, fill=label2color[label], font=font) words = annotation["words"] for word in words: box = word["box"] draw.rectangle(box, outline=label2color[label], width=1) image<jupyter_output><empty_output><jupyter_text>Preprocessing the dataNext, we need to turn the document images into individual tokens and corresponding labels (BIOES format, see further). We do this both for the training and test datasets. Make sure to run this from the `/content` directory:<jupyter_code># ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/training_data/annotations \ # --data_split train \ # --output_dir data \ # --model_name_or_path microsoft/layoutlm-base-uncased \ # --max_len 510 # ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/testing_data/annotations \ # --data_split test \ # --output_dir data \ # --model_name_or_path microsoft/layoutlm-base-uncased \ # --max_len 510<jupyter_output><empty_output><jupyter_text>Next, we create a labels.txt file that contains the unique labels of the FUNSD dataset:<jupyter_code># ! cat data/train.txt | cut -d$'\t' -f 2 | grep -v "^$"| sort | uniq > data/labels.txt<jupyter_output><empty_output><jupyter_text>Define a PyTorch datasetFirst, we create a list containing the unique labels based on `data/labels.txt` (run this from the content directory):<jupyter_code>from torch.nn import CrossEntropyLoss def get_labels(path): with open(path, "r") as f: labels = f.read().splitlines() if "O" not in labels: labels = ["O"] + labels return labels labels = get_labels("data/labels.txt") num_labels = len(labels) label_map = {i: label for i, label in enumerate(labels)} # Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later pad_token_label_id = CrossEntropyLoss().ignore_index<jupyter_output><empty_output><jupyter_text>We can see that the dataset uses the so-called BIOES annotation scheme to annotate the tokens. This means that a given token can be either at the beginning (B), inside (I), outside (O), at the end (E) or start (S) of a given entity. Entities include ANSWER, QUESTION, HEADER and OTHER:<jupyter_code>print(labels)<jupyter_output>['B-ANSWER', 'B-HEADER', 'B-QUESTION', 'E-ANSWER', 'E-HEADER', 'E-QUESTION', 'I-ANSWER', 'I-HEADER', 'I-QUESTION', 'O', 'S-ANSWER', 'S-HEADER', 'S-QUESTION']<jupyter_text>Next, we can create a PyTorch dataset and corresponding dataloader (both for training and evaluation):<jupyter_code>import logging import os import torch from torch.utils.data import Dataset logger = logging.getLogger(__name__) class FunsdDataset(Dataset): def __init__(self, args, tokenizer, labels, pad_token_label_id, mode): if args.local_rank not in [-1, 0] and mode == "train": torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) examples = read_examples_from_file(args.data_dir, mode) features = convert_examples_to_features( examples, labels, args.max_seq_length, tokenizer, cls_token_at_end=bool(args.model_type in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=bool(args.model_type in ["roberta"]), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0, pad_token_label_id=pad_token_label_id, ) # if args.local_rank in [-1, 0]: # logger.info("Saving features into cached file %s", cached_features_file) # torch.save(features, cached_features_file) if args.local_rank == 0 and mode == "train": torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache self.features = features # Convert to Tensors and build dataset self.all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) self.all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) self.all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) self.all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) self.all_bboxes = torch.tensor([f.boxes for f in features], dtype=torch.long) def __len__(self): return len(self.features) def __getitem__(self, index): return ( self.all_input_ids[index], self.all_input_mask[index], self.all_segment_ids[index], self.all_label_ids[index], self.all_bboxes[index], ) class InputExample(object): """A single training/test example for token classification.""" def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size): """Constructs a InputExample. Args: guid: Unique id for the example. words: list. The words of the sequence. labels: (Optional) list. The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.words = words self.labels = labels self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size class InputFeatures(object): """A single set of features of data.""" def __init__( self, input_ids, input_mask, segment_ids, label_ids, boxes, actual_bboxes, file_name, page_size, ): assert ( 0 <= all(boxes) <= 1000 ), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(boxes) self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size def read_examples_from_file(data_dir, mode): file_path = os.path.join(data_dir, "{}.txt".format(mode)) box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode)) image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode)) guid_index = 1 examples = [] with open(file_path, encoding="utf-8") as f, open(box_file_path, encoding="utf-8") as fb, open( image_file_path, encoding="utf-8" ) as fi: words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] for line, bline, iline in zip(f, fb, fi): if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append( InputExample( guid="{}-{}".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) guid_index += 1 words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] else: splits = line.split("\t") bsplits = bline.split("\t") isplits = iline.split("\t") assert len(splits) == 2 assert len(bsplits) == 2 assert len(isplits) == 4 assert splits[0] == bsplits[0] words.append(splits[0]) if len(splits) > 1: labels.append(splits[-1].replace("\n", "")) box = bsplits[-1].replace("\n", "") box = [int(b) for b in box.split()] boxes.append(box) actual_bbox = [int(b) for b in isplits[1].split()] actual_bboxes.append(actual_bbox) page_size = [int(i) for i in isplits[2].split()] file_name = isplits[3].strip() else: # Examples could have no label for mode = "test" labels.append("O") if words: examples.append( InputExample( guid="%s-%d".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return examples def convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_segment_id=0, pad_token_label_id=-1, sequence_a_segment_id=0, mask_padding_with_zero=True, ): """Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in enumerate(examples): file_name = example.file_name page_size = example.page_size width, height = page_size if ex_index % 10000 == 0: logger.info("Writing example %d of %d", ex_index, len(examples)) tokens = [] token_boxes = [] actual_bboxes = [] label_ids = [] for word, label, box, actual_bbox in zip(example.words, example.labels, example.boxes, example.actual_bboxes): word_tokens = tokenizer.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) actual_bboxes.extend([actual_bbox] * len(word_tokens)) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = 3 if sep_token_extra else 2 if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[: (max_seq_length - special_tokens_count)] token_boxes = token_boxes[: (max_seq_length - special_tokens_count)] actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)] label_ids = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] token_boxes += [cls_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens token_boxes = [cls_token_box] + token_boxes actual_bboxes = [[0, 0, width, height]] + actual_bboxes label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids token_boxes = ([pad_token_box] * padding_length) + token_boxes else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length token_boxes += [pad_token_box] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length assert len(token_boxes) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s", example.guid) logger.info("tokens: %s", " ".join([str(x) for x in tokens])) logger.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) logger.info("label_ids: %s", " ".join([str(x) for x in label_ids])) logger.info("boxes: %s", " ".join([str(x) for x in token_boxes])) logger.info("actual_bboxes: %s", " ".join([str(x) for x in actual_bboxes])) features.append( InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids, boxes=token_boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return features from transformers import LayoutLMTokenizer # from .unilm.layoutlm.data.funsd import FunsdDataset, InputFeatures from torch.utils.data import DataLoader, RandomSampler, SequentialSampler batch_size = 16 args = { "local_rank": -1, "overwrite_cache": True, "data_dir": "/home/sourab/temp/data/", "model_name_or_path": "microsoft/layoutlm-base-uncased", "max_seq_length": 512, "model_type": "layoutlm", } # class to turn the keys of a dict into attributes (thanks Stackoverflow) class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self args = AttrDict(args) tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") # the LayoutLM authors already defined a specific FunsdDataset, so we are going to use this here train_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="train") train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size) eval_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="test") eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=batch_size) len(train_dataloader) len(eval_dataloader) batch = next(iter(train_dataloader)) input_ids = batch[0][0] tokenizer.decode(input_ids)<jupyter_output><empty_output><jupyter_text>Define and fine-tune the modelAs this is a sequence labeling task, we are going to load `LayoutLMForTokenClassification` (the base sized model) from the hub. We are going to fine-tune it on a downstream task, namely FUNSD.<jupyter_code>from peft import get_peft_config, PeftModel, get_peft_model, LoraConfig, TaskType peft_config = LoraConfig( task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all" ) peft_config from transformers import LayoutLMForTokenClassification import torch from transformers import set_seed seed = 100 set_seed(seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=num_labels) model = get_peft_model(model, peft_config) model.to(device) print(model.model.layoutlm.encoder.layer[0].attention.self.query.weight) print(model.model.layoutlm.encoder.layer[0].attention.self.query.lora_A.weight) print(model.model.classifier.weight)<jupyter_output><empty_output><jupyter_text>Now we can start training:<jupyter_code>from transformers import AdamW, get_linear_schedule_with_warmup from tqdm import tqdm num_train_epochs = 100 optimizer = torch.optim.AdamW(model.parameters(), lr=3e-3) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_train_epochs), num_training_steps=(len(train_dataloader) * num_train_epochs), ) global_step = 0 t_total = len(train_dataloader) * num_train_epochs # total number of training steps # put the model in training mode model.train() for epoch in range(num_train_epochs): for batch in tqdm(train_dataloader, desc="Training"): input_ids = batch[0].to(device) bbox = batch[4].to(device) attention_mask = batch[1].to(device) token_type_ids = batch[2].to(device) labels = batch[3].to(device) # forward pass outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels ) loss = outputs.loss # print loss every 100 steps if global_step % 10 == 0: print(f"Loss after {global_step} steps: {loss.item()}") # backward pass to get the gradients loss.backward() # print("Gradients on classification head:") # print(model.classifier.weight.grad[6,:].sum()) # update optimizer.step() lr_scheduler.step() optimizer.zero_grad() global_step += 1 import numpy as np from seqeval.metrics import ( classification_report, f1_score, precision_score, recall_score, ) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None # put model in evaluation mode model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): with torch.no_grad(): input_ids = batch[0].to(device) bbox = batch[4].to(device) attention_mask = batch[1].to(device) token_type_ids = batch[2].to(device) labels = batch[3].to(device) # forward pass outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels ) # get the loss and logits tmp_eval_loss = outputs.loss logits = outputs.logits eval_loss += tmp_eval_loss.item() nb_eval_steps += 1 # compute the predictions if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = labels.detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0) # compute average evaluation loss eval_loss = eval_loss / nb_eval_steps preds = np.argmax(preds, axis=2) out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] for i in range(out_label_ids.shape[0]): for j in range(out_label_ids.shape[1]): if out_label_ids[i, j] != pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) results = { "loss": eval_loss, "precision": precision_score(out_label_list, preds_list), "recall": recall_score(out_label_list, preds_list), "f1": f1_score(out_label_list, preds_list), } print(results) model.print_trainable_parameters() model.save_pretrained("peft_layoutlm") !du -h "peft_layoutlm/adapter_model.bin"<jupyter_output>2,8M layoutlm_funsd.pt
peft/examples/token_classification/peft_lora_token_cls.ipynb/0
{ "file_path": "peft/examples/token_classification/peft_lora_token_cls.ipynb", "repo_id": "peft", "token_count": 11949 }
180
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Any, List, Optional import torch import torch.nn as nn from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils import transpose class IA3Layer(BaseTunerLayer): # All names of layers that may contain adapter weights adapter_layer_names = ("ia3_l",) def __init__(self, base_layer: nn.Module, is_feedforward: bool, **kwargs) -> None: self.base_layer = base_layer self.ia3_l = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.is_feedforward = is_feedforward base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, nn.Conv2d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Embedding): in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim elif isinstance(base_layer, Conv1D): in_features, out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) else: raise ValueError(f"Unsupported layer type {type(base_layer)}") self.in_features = in_features self.out_features = out_features def update_layer(self, adapter_name, init_ia3_weights): # This code works for linear layers, override for other layer types # Actual trainable parameters if self.is_feedforward: weight = torch.randn((1, self.in_features)) else: weight = torch.randn((self.out_features, 1)) self.ia3_l[adapter_name] = nn.Parameter(weight) if init_ia3_weights: self.reset_ia3_parameters(adapter_name) self.to(self.get_base_layer().weight.device) self.set_adapter(self.active_adapters) def reset_ia3_parameters(self, adapter_name): if adapter_name in self.ia3_l.keys(): # initialize learned vector with torch.ones nn.init.constant_(self.ia3_l[adapter_name], 1.0) class Linear(nn.Module, IA3Layer): # (IA)^3 implemented in a dense layer def __init__( self, base_layer: nn.Module, adapter_name: str, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer is_target_conv_1d_layer: bool = False, # whether target module is a conv1d layer. useful while unloading later init_ia3_weights: bool = True, # whether to initialize IA3 weights **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) self.fan_in_fan_out = fan_in_fan_out self.is_target_conv_1d_layer = is_target_conv_1d_layer self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) if safe_merge: orig_weights = base_layer.weight.data orig_weights = torch.mul(orig_weights, ia3_l) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_l) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return warnings.warn("Unmerge result can be inaccurate for (IA)^3.") while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() # Add tolerace to avoid division by zero ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + 1e-8 base_layer.weight.data = torch.div(base_layer.weight.data, ia3_l) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.div(base_layer.bias.data, scaling.data + 1e-8) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: dtype = previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue dtype = self.ia3_l[active_adapter].dtype ia3_scaling *= self.ia3_l[active_adapter].flatten() if self.is_feedforward: x = x.to(dtype) # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype # e.g. bf16 vs fp32. Is that okay? interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype) result = self.base_layer(interm, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) result = result.to(dtype) * ia3_scaling result = result.to(previous_dtype) return result class Conv2d(nn.Module, IA3Layer): def __init__( self, base_layer: nn.Module, adapter_name: str, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer init_ia3_weights: bool = True, **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def update_layer(self, adapter_name, init_ia3_weights): # Actual trainable parameters if self.is_feedforward: weight = torch.randn((1, self.in_features, 1, 1)) else: weight = torch.randn((1, self.out_features, 1, 1)) self.ia3_l[adapter_name] = nn.Parameter(weight) if init_ia3_weights: self.reset_ia3_parameters(adapter_name) self.to(self.get_base_layer().weight.device) self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() ia3_scaling = self.ia3_l[active_adapter].data if not self.is_feedforward: ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) if safe_merge: output_weight = torch.mul(base_layer.weight.data, ia3_scaling).clone() if not torch.isfinite(output_weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = output_weight else: base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_scaling) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return warnings.warn("Unmerge result can be inaccurate for (IA)^3.") while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() # divide by (IA)^3 vector. Add tolerace to avoid division by zero ia3_scaling = self.ia3_l[active_adapter].data if not self.is_feedforward: ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) base_layer.weight.data = torch.div(base_layer.weight.data, ia3_scaling + 1e-8) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: dtype = previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue dtype = self.ia3_l[active_adapter].dtype ia3_scaling *= self.ia3_l[active_adapter] if self.is_feedforward: x = x.to(dtype) # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype # e.g. bf16 vs fp32. Is that okay? interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype) result = self.base_layer(interm, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) result = result.to(dtype) * ia3_scaling result = result.to(previous_dtype) return result
peft/src/peft/tuners/ia3/layer.py/0
{ "file_path": "peft/src/peft/tuners/ia3/layer.py", "repo_id": "peft", "token_count": 6410 }
181
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import math import warnings from typing import Any, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.integrations import dequantize_bnb_weight, gather_params_ctx from peft.utils.other import transpose from .config import LoraConfig class LoraLayer(BaseTunerLayer): # All names of layers that may contain (trainable) adapter weights adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B") # All names of other parameters that may contain adapter-related parameters other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout") def __init__(self, base_layer: nn.Module, **kwargs) -> None: self.base_layer = base_layer self.r = {} self.lora_alpha = {} self.scaling = {} self.lora_dropout = nn.ModuleDict({}) self.lora_A = nn.ModuleDict({}) self.lora_B = nn.ModuleDict({}) # For Embedding layer self.lora_embedding_A = nn.ParameterDict({}) self.lora_embedding_B = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.use_dora: dict[str, bool] = {} self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA self._caches: dict[str, Any] = {} self.kwargs = kwargs base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, nn.Conv2d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Embedding): in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim elif isinstance(base_layer, Conv1D): in_features, out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) elif hasattr(base_layer, "infeatures") and hasattr(base_layer, "outfeatures"): # QuantLinear in_features, out_features = base_layer.infeatures, base_layer.outfeatures elif hasattr(base_layer, "input_size") and hasattr(base_layer, "output_size"): # Megatron ColumnParallelLinear,RowParallelLinear in_features, out_features = base_layer.input_size, base_layer.output_size elif hasattr(base_layer, "codebooks") and base_layer.__class__.__name__ == "QuantizedLinear": # AQLM QuantLinear in_features, out_features = base_layer.in_features, base_layer.out_features elif hasattr(base_layer, "w_bit") and base_layer.__class__.__name__ == "WQLinear_GEMM": # Awq layers in_features, out_features = base_layer.in_features, base_layer.out_features else: raise ValueError(f"Unsupported layer type {type(base_layer)}") self.in_features = in_features self.out_features = out_features def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool = False ): # This code works for linear layers, override for other layer types if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer})) # Actual trainable parameters self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False) self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=False) if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) # check weight and qweight (for GPTQ) for weight_name in ("weight", "qweight"): weight = getattr(self.get_base_layer(), weight_name, None) if weight is not None: # the layer is already completely initialized, this is an update if weight.dtype.is_floating_point or weight.dtype.is_complex: self.to(weight.device, dtype=weight.dtype) else: self.to(weight.device) break if use_dora: self.dora_init(adapter_name) self.use_dora[adapter_name] = True else: self.use_dora[adapter_name] = False self.set_adapter(self.active_adapters) def reset_lora_parameters(self, adapter_name, init_lora_weights): if init_lora_weights is False: return if adapter_name in self.lora_A.keys(): if init_lora_weights is True: # initialize A the same way as the default for nn.Linear and B to zero # https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124 nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5)) elif init_lora_weights.lower() == "gaussian": nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name]) else: raise ValueError(f"Unknown initialization {init_lora_weights=}") nn.init.zeros_(self.lora_B[adapter_name].weight) if adapter_name in self.lora_embedding_A.keys(): # initialize a the same way as the default for nn.linear and b to zero nn.init.zeros_(self.lora_embedding_A[adapter_name]) nn.init.normal_(self.lora_embedding_B[adapter_name]) def loftq_init(self, adapter_name): from peft.utils.loftq_utils import loftq_init weight = self.get_base_layer().weight kwargs = { "num_bits": self.kwargs.get("loftq_bits", 4), "reduced_rank": self.r[adapter_name], "num_iter": self.kwargs.get("loftq_iter", 1), } qweight, lora_A, lora_B = loftq_init(weight, **kwargs) if adapter_name in self.lora_A.keys(): # initialize A the same way as the default for nn.Linear and B to zero self.lora_A[adapter_name].weight.data = lora_A self.lora_B[adapter_name].weight.data = lora_B if adapter_name in self.lora_embedding_A.keys(): # initialize a the same way as the default for nn.linear and b to zero self.lora_embedding_A[adapter_name].weight.data = lora_A self.lora_embedding_B[adapter_name].weight.data = lora_B self.get_base_layer().weight.data = qweight def _get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor: # calculate L2 norm of weight matrix, column-wise weight = weight + scaling * lora_weight weight_norm = torch.linalg.norm(weight, dim=1).to(weight.dtype) return weight_norm def dora_init(self, adapter_name: str) -> None: lora_A = self.lora_A[adapter_name] lora_B = self.lora_B[adapter_name] scaling = self.scaling[adapter_name] with gather_params_ctx(self.get_base_layer()): weight = self.get_base_layer().weight quant_state = getattr(self.get_base_layer(), "state", None) weight = dequantize_bnb_weight(weight, state=quant_state) # no-op if not bnb if weight.data.ndim == 4: # For handling LoRAs applied to Conv2Ds. lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1)) lora_weight = lora_weight.reshape(weight.shape) else: lora_weight = lora_B.weight @ lora_A.weight weight_norm = self._get_weight_norm(weight, lora_weight, scaling) self.lora_magnitude_vector = nn.ParameterDict() self.lora_magnitude_vector[adapter_name] = nn.Parameter(weight_norm, requires_grad=True) # add lora_magnitude_vector to the list of learnable parameters self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",) def _cache_store(self, key: str, value: Any) -> None: self._caches[key] = value def _cache_pop(self, key: str) -> Any: value = self._caches.pop(key) return value def _apply_dora(self, x, lora_A, lora_B, scaling, active_adapter): """ For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer output. """ lora_weight = lora_B.weight @ lora_A.weight magnitude = self.lora_magnitude_vector[active_adapter] weight = self.get_base_layer().weight quant_state = getattr(self.get_base_layer(), "state", None) weight = dequantize_bnb_weight(weight, state=quant_state) # no-op if not bnb weight = weight.to(x.dtype) weight_norm = self._get_weight_norm(weight, lora_weight, scaling) # see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353) # "[...] we suggest treating ||V +∆V ||_c in # Eq. (5) as a constant, thereby detaching it from the gradient # graph. This means that while ||V + ∆V ||_c dynamically # reflects the updates of ∆V , it won’t receive any gradient # during backpropagation" weight_norm = weight_norm.detach() mag_norm_scale = (magnitude / weight_norm).view(1, -1) result_dora = (mag_norm_scale - 1) * ( F.linear(x, transpose(weight, self.fan_in_fan_out)) ) + mag_norm_scale * lora_B(lora_A(x)) * scaling # Note: Computation could potentially be accelerated by using the code below instead of calculating X@W again. # This is only correct if dropout=0, otherwise results will differ: # https://github.com/huggingface/peft/pull/1474#issuecomment-1964682771 # bias = self.get_base_layer().bias # if bias is not None: # result = result - bias # result = mag_norm_scale * result + mag_norm_scale * lora_B(lora_A(x)) * scaling # if bias is not None: # result = result + bias return result_dora def set_scale(self, adapter, scale): if adapter not in self.scaling: # Ignore the case where the adapter is not in the layer return self.scaling[adapter] = scale * self.lora_alpha[adapter] / self.r[adapter] def scale_layer(self, scale: float) -> None: if scale == 1: return for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue self.scaling[active_adapter] *= scale def unscale_layer(self, scale=None) -> None: for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue if scale is None: self.scaling[active_adapter] = self.lora_alpha[active_adapter] / self.r[active_adapter] else: self.scaling[active_adapter] /= scale def _check_forward_args(self, x, *args, **kwargs): """Check if the arguments are compatible with the configs and state of the model""" adapter_names = kwargs.get("adapter_names", None) if adapter_names is None: return if len(x) != len(adapter_names): msg = ( "Length of `adapter_names` should be the same as the number of inputs, but got " f"{len(adapter_names)} and {len(x)} respectively." ) raise ValueError(msg) if self.merged: # It is unclear what would be the right thing to do if users pass adapter_names and there are merged # adapters. Therefore, it is better to raise an error in this case. msg = "Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first." raise ValueError(msg) unique_adapters = set(self.active_adapters) for adapter_name in unique_adapters: if self.use_dora.get(adapter_name, False): msg = "Cannot pass `adapter_names` when DoRA is enabled." raise ValueError(msg) def _mixed_batch_forward( self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any ) -> torch.Tensor: # This is a special method that handles the case when users pass the argument `adapter_names`. This is an # extra argument that allows mixing different adapters in the same batch at inference time. result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype unique_adapters = set(adapter_names) sub_batch_indices_list = [] for adapter in unique_adapters: sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) for i, active_adapter in enumerate(unique_adapters): if active_adapter == "__base__": continue if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear # layer output sub_batch = x[sub_batch_indices_list[i]].to(lora_A.weight.dtype) lora_output = lora_B(lora_A(dropout(sub_batch))) * scaling result[sub_batch_indices_list[i]] += lora_output.to(torch_result_dtype) return result # Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py # and modified to work with PyTorch FSDP # ------------------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ class Linear(nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_target_conv_1d_layer: bool = False, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) self.is_target_conv_1d_layer = is_target_conv_1d_layer def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: orig_weights = orig_weights + delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm orig_weights = dora_factor.view(-1, 1) * (orig_weights + delta_weight) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: base_layer.weight.data = base_layer.weight.data + delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm new_weight = dora_factor.view(-1, 1) * (base_layer.weight.data + delta_weight) base_layer.weight.data = new_weight self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): weight = self.get_base_layer().weight delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: weight.data -= delta_weight else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight weight.data = weight_orig def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_B[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = x.to(lora_A.weight.dtype) if not self.use_dora[active_adapter]: result = result + lora_B(lora_A(dropout(x))) * scaling else: x = dropout(x) result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Embedding(nn.Module, LoraLayer): # LoRA implemented in a Embedding layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) if use_dora: raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters weight_A = torch.randn((r, self.in_features)) weight_B = torch.randn((self.out_features, r)) self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A) self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B) if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) base_layer = self.get_base_layer() weight = getattr(base_layer, "weight", None) if weight is not None: # the layer is already completely initialized, this is an update self.to(base_layer.weight.device, dtype=weight.dtype) self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_embedding_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() orig_weights = orig_weights + self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data = base_layer.weight.data + self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_embedding_A.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_embedding_B[adapter].device dtype = self.lora_embedding_A[adapter].dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_embedding_A[adapter] weight_B = self.lora_embedding_B[adapter] if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_embedding_A[adapter] = weight_A.to(dtype) self.lora_embedding_B[adapter] = weight_B.to(dtype) return output_tensor def _mixed_batch_forward( self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any ) -> torch.Tensor: # This is a special method that handles the case when users pass the argument `adapter_names`. This is an # extra argument that allows mixing different adapters in the same batch at inference time. result = self.base_layer(x, *args, **kwargs) unique_adapters = set(adapter_names) sub_batch_indices_list = [] for adapter in unique_adapters: sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) for i, active_adapter in enumerate(unique_adapters): if active_adapter == "__base__": continue if active_adapter not in self.lora_embedding_A.keys(): continue embedding_A = self.lora_embedding_A[active_adapter].T embedding_B = self.lora_embedding_B[active_adapter].T scaling = self.scaling[active_adapter] # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear # layer output sub_batch = x[sub_batch_indices_list[i]] after_A = self._embed(sub_batch, embedding_A) result[sub_batch_indices_list[i]] += (after_A @ embedding_B) * scaling return result def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: base_layer = self.get_base_layer() return F.embedding( input, weight, padding_idx=base_layer.padding_idx, max_norm=base_layer.max_norm, norm_type=base_layer.norm_type, scale_grad_by_freq=base_layer.scale_grad_by_freq, sparse=base_layer.sparse, ) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # TODO: no dtype conversion here, unlike in Linear, is that correct? self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_embedding_A: continue embedding_A = self.lora_embedding_A[active_adapter].T embedding_B = self.lora_embedding_B[active_adapter].T scaling = self.scaling[active_adapter] after_A = self._embed(x, embedding_A) result = result + (after_A @ embedding_B) * scaling result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Conv2d(nn.Module, LoraLayer): # Lora implemented in a conv2d layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters base_layer = self.get_base_layer() kernel_size = base_layer.kernel_size stride = base_layer.stride padding = base_layer.padding self.lora_A[adapter_name] = nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False) self.lora_B[adapter_name] = nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False) if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) weight = getattr(base_layer, "weight", None) if weight is not None: # the layer is already completely initialized, this is an update self.to(base_layer.weight.device, dtype=weight.dtype) if use_dora: self.dora_init(adapter_name) self.use_dora[adapter_name] = True else: self.use_dora[adapter_name] = False self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights inside the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: orig_weights = orig_weights + delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm orig_weights = dora_factor.view(-1, 1, 1, 1) * (orig_weights + delta_weight) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: base_layer.weight.data = base_layer.weight.data + delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm new_weight = dora_factor.view(-1, 1, 1, 1) * (base_layer.weight.data + delta_weight) base_layer.weight.data = new_weight self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): weight = self.get_base_layer().weight delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: weight.data -= delta_weight else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm weight_orig = weight.data / dora_factor.view(-1, 1, 1, 1) - delta_weight weight.data = weight_orig def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_A[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117 if self.get_base_layer().weight.size()[2:4] == (1, 1): # conv2d 1x1 output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze( 3 ) * self.scaling[adapter] else: # conv2d 3x3 output_tensor = ( F.conv2d( weight_A.permute(1, 0, 2, 3), weight_B, ).permute(1, 0, 2, 3) * self.scaling[adapter] ) if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def _get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor: # calculate L2 norm of weight matrix, channel-wise weight = weight + scaling * lora_weight # the following is needed to have compatibility with the 4D weight tensors of Conv2D weight_norm = weight.norm(p=2, dim=(1, 2, 3), keepdim=True).transpose(1, 0) return weight_norm def _apply_dora(self, x, lora_A, lora_B, scaling, active_adapter): """ For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer output. """ base_layer = self.get_base_layer() weight = base_layer.weight lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1)) lora_weight = lora_weight.reshape(weight.shape) magnitude = self.lora_magnitude_vector[active_adapter] weight_norm = self._get_weight_norm(weight, lora_weight, scaling) # see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353) # "[...] we suggest treating ||V +∆V ||_c in # Eq. (5) as a constant, thereby detaching it from the gradient # graph. This means that while ||V + ∆V ||_c dynamically # reflects the updates of ∆V , it won’t receive any gradient # during backpropagation" weight_norm = weight_norm.detach() mag_norm_scale = magnitude / weight_norm result_dora = (mag_norm_scale - 1) * ( F.conv2d( x, weight, bias=None, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups, ) ) + mag_norm_scale * lora_B(lora_A(x)) * scaling return result_dora def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = x.to(lora_A.weight.dtype) if not self.use_dora[active_adapter]: result = result + lora_B(lora_A(dropout(x))) * scaling else: x = dropout(x) result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep def dispatch_default( target: torch.nn.Module, adapter_name: str, lora_config: LoraConfig, **kwargs, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Embedding): embedding_kwargs = kwargs.copy() embedding_kwargs.pop("fan_in_fan_out", None) embedding_kwargs.update(lora_config.loftq_config) new_module = Embedding(target, adapter_name, **embedding_kwargs) elif isinstance(target_base_layer, torch.nn.Conv2d): kwargs.update(lora_config.loftq_config) new_module = Conv2d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, **kwargs) elif isinstance(target_base_layer, Conv1D): if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs) return new_module
peft/src/peft/tuners/lora/layer.py/0
{ "file_path": "peft/src/peft/tuners/lora/layer.py", "repo_id": "peft", "token_count": 22293 }
182
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List, Literal import torch def reshape_weight_task_tensors(task_tensors, weights): """ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. Args: task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. weights (`torch.Tensor`): The tensor to be reshaped. Returns: `torch.Tensor`: The reshaped tensor. """ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) weights = weights.view(new_shape) return weights def magnitude_based_pruning(tensor: torch.Tensor, density: float) -> torch.Tensor: """ Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction `density`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The tensor with the pruned weights. """ mask = torch.zeros_like(tensor).reshape(-1) k = int(density * tensor.numel()) top_k = torch.topk(tensor.abs().reshape(-1), k=k, largest=True) mask[top_k[1]] = 1 return tensor * mask.reshape(tensor.shape) def random_pruning(tensor: torch.Tensor, density: float, rescale: bool) -> torch.Tensor: """ Prune random values based on the specified fraction `density`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ mask = torch.bernoulli(torch.full_like(input=tensor, fill_value=density)) pruned_tensor = tensor * mask if rescale: torch.div(input=pruned_tensor, other=density) return pruned_tensor def prune( tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False ) -> torch.Tensor: """ Prune the values of task tensors based on the `method`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. method (`str`):The method to use to prune. Should be one of ["magnitude", "random"]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ if density >= 1: warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.") return tensor elif density < 0: raise ValueError(f"Density should be >= 0, got {density}") if method == "magnitude": return magnitude_based_pruning(tensor, density) elif method == "random": return random_pruning(tensor, density, rescale=rescale) else: raise ValueError(f"Unknown method {method}") def calculate_majority_sign_mask( tensor: torch.Tensor, method: Literal["total", "frequency"] = "total" ) -> torch.Tensor: """ Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0. Args: tensor (`torch.Tensor`):The tensor to get the mask from. method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The majority sign mask. """ sign = tensor.sign() if method == "total": sign_magnitude = tensor.sum(dim=0) elif method == "frequency": sign_magnitude = sign.sum(dim=0) else: raise RuntimeError(f'Unimplemented mask method "{method}"') majority_sign = torch.where(sign_magnitude >= 0, 1, -1) return sign == majority_sign def disjoint_merge(task_tensors: torch.Tensor, majority_sign_mask: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using disjoint merge. Args: task_tensors (`torch.Tensor`):The task tensors to merge. majority_sign_mask (`torch.Tensor`):The mask of the majority sign across the task tensors. Returns: `torch.Tensor`: The merged tensor. """ mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0) num_params_preserved = majority_sign_mask.sum(dim=0) return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0) def task_arithmetic(task_tensors: List[torch.Tensor], weights: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. Returns: `torch.Tensor`: The merged tensor. """ task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def magnitude_prune(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`): The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors def dare_linear(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `dare linear`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def dare_ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `dare ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors
peft/src/peft/utils/merge_utils.py/0
{ "file_path": "peft/src/peft/utils/merge_utils.py", "repo_id": "peft", "token_count": 3819 }
183
*This guideline is very much a work-in-progress.* Contributions to `timm` for code, documentation, tests are more than welcome! There haven't been any formal guidelines to date so please bear with me, and feel free to add to this guide. # Coding style Code linting and auto-format (black) are not currently in place but open to consideration. In the meantime, the style to follow is (mostly) aligned with Google's guide: https://google.github.io/styleguide/pyguide.html. A few specific differences from Google style (or black) 1. Line length is 120 char. Going over is okay in some cases (e.g. I prefer not to break URL across lines). 2. Hanging indents are always prefered, please avoid aligning arguments with closing brackets or braces. Example, from Google guide, but this is a NO here: ``` # Aligned with opening delimiter. foo = long_function_name(var_one, var_two, var_three, var_four) meal = (spam, beans) # Aligned with opening delimiter in a dictionary. foo = { 'long_dictionary_key': value1 + value2, ... } ``` This is YES: ``` # 4-space hanging indent; nothing on first line, # closing parenthesis on a new line. foo = long_function_name( var_one, var_two, var_three, var_four ) meal = ( spam, beans, ) # 4-space hanging indent in a dictionary. foo = { 'long_dictionary_key': long_dictionary_value, ... } ``` When there is discrepancy in a given source file (there are many origins for various bits of code and not all have been updated to what I consider current goal), please follow the style in a given file. In general, if you add new code, formatting it with black using the following options should result in a style that is compatible with the rest of the code base: ``` black --skip-string-normalization --line-length 120 <path-to-file> ``` Avoid formatting code that is unrelated to your PR though. PR with pure formatting / style fixes will be accepted but only in isolation from functional changes, best to ask before starting such a change. # Documentation As with code style, docstrings style based on the Google guide: guide: https://google.github.io/styleguide/pyguide.html The goal for the code is to eventually move to have all major functions and `__init__` methods use PEP484 type annotations. When type annotations are used for a function, as per the Google pyguide, they should **NOT** be duplicated in the docstrings, please leave annotations as the one source of truth re typing. There are a LOT of gaps in current documentation relative to the functionality in timm, please, document away! # Installation Create a Python virtual environment using Python 3.10. Inside the environment, install torch` and `torchvision` using the instructions matching your system as listed on the [PyTorch website](https://pytorch.org/). Then install the remaining dependencies: ``` python -m pip install -r requirements.txt python -m pip install -r requirements-dev.txt # for testing python -m pip install -e . ``` ## Unit tests Run the tests using: ``` pytest tests/ ``` Since the whole test suite takes a lot of time to run locally (a few hours), you may want to select a subset of tests relating to the changes you made by using the `-k` option of [`pytest`](https://docs.pytest.org/en/7.1.x/example/markers.html#using-k-expr-to-select-tests-based-on-their-name). Moreover, running tests in parallel (in this example 4 processes) with the `-n` option may help: ``` pytest -k "substring-to-match" -n 4 tests/ ``` ## Building documentation Please refer to [this document](https://github.com/huggingface/pytorch-image-models/tree/main/hfdocs). # Questions If you have any questions about contribution, where / how to contribute, please ask in the [Discussions](https://github.com/huggingface/pytorch-image-models/discussions/categories/contributing) (there is a `Contributing` topic).
pytorch-image-models/CONTRIBUTING.md/0
{ "file_path": "pytorch-image-models/CONTRIBUTING.md", "repo_id": "pytorch-image-models", "token_count": 1224 }
184
# Model Summaries The model architectures included come from a wide variety of sources. Sources, including papers, original impl ("reference code") that I rewrote / adapted, and PyTorch impl that I leveraged directly ("code") are listed below. Most included models have pretrained weights. The weights are either: 1. from their original sources 2. ported by myself from their original impl in a different framework (e.g. Tensorflow models) 3. trained from scratch using the included training script The validation results for the pretrained weights are [here](results.md) A more exciting view (with pretty pictures) of the models within `timm` can be found at [paperswithcode](https://paperswithcode.com/lib/timm). ## Big Transfer ResNetV2 (BiT) [[resnetv2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnetv2.py)] * Paper: `Big Transfer (BiT): General Visual Representation Learning` - https://arxiv.org/abs/1912.11370 * Reference code: https://github.com/google-research/big_transfer ## Cross-Stage Partial Networks [[cspnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cspnet.py)] * Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 * Reference impl: https://github.com/WongKinYiu/CrossStagePartialNetworks ## DenseNet [[densenet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/densenet.py)] * Paper: `Densely Connected Convolutional Networks` - https://arxiv.org/abs/1608.06993 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models ## DLA [[dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py)] * Paper: https://arxiv.org/abs/1707.06484 * Code: https://github.com/ucbdrive/dla ## Dual-Path Networks [[dpn.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dpn.py)] * Paper: `Dual Path Networks` - https://arxiv.org/abs/1707.01629 * My PyTorch code: https://github.com/rwightman/pytorch-dpn-pretrained * Reference code: https://github.com/cypw/DPNs ## GPU-Efficient Networks [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)] * Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 * Reference code: https://github.com/idstcv/GPU-Efficient-Networks ## HRNet [[hrnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hrnet.py)] * Paper: `Deep High-Resolution Representation Learning for Visual Recognition` - https://arxiv.org/abs/1908.07919 * Code: https://github.com/HRNet/HRNet-Image-Classification ## Inception-V3 [[inception_v3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v3.py)] * Paper: `Rethinking the Inception Architecture for Computer Vision` - https://arxiv.org/abs/1512.00567 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models ## Inception-V4 [[inception_v4.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v4.py)] * Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets ## Inception-ResNet-V2 [[inception_resnet_v2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_resnet_v2.py)] * Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets ## NASNet-A [[nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py)] * Papers: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet ## PNasNet-5 [[pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py)] * Papers: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet ## EfficientNet [[efficientnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py)] * Papers: * EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252 * EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665 * EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946 * EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html * MixNet - https://arxiv.org/abs/1907.09595 * MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626 * MobileNet-V2 - https://arxiv.org/abs/1801.04381 * FBNet-C - https://arxiv.org/abs/1812.03443 * Single-Path NAS - https://arxiv.org/abs/1904.02877 * My PyTorch code: https://github.com/rwightman/gen-efficientnet-pytorch * Reference code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## MobileNet-V3 [[mobilenetv3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py)] * Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet ## RegNet [[regnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/regnet.py)] * Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 * Reference code: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py ## RepVGG [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)] * Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 * Reference code: https://github.com/DingXiaoH/RepVGG ## ResNet, ResNeXt [[resnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py)] * ResNet (V1B) * Paper: `Deep Residual Learning for Image Recognition` - https://arxiv.org/abs/1512.03385 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models * ResNeXt * Paper: `Aggregated Residual Transformations for Deep Neural Networks` - https://arxiv.org/abs/1611.05431 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models * 'Bag of Tricks' / Gluon C, D, E, S ResNet variants * Paper: `Bag of Tricks for Image Classification with CNNs` - https://arxiv.org/abs/1812.01187 * Code: https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py * Instagram pretrained / ImageNet tuned ResNeXt101 * Paper: `Exploring the Limits of Weakly Supervised Pretraining` - https://arxiv.org/abs/1805.00932 * Weights: https://pytorch.org/hub/facebookresearch_WSL-Images_resnext (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) * Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet and ResNeXts * Paper: `Billion-scale semi-supervised learning for image classification` - https://arxiv.org/abs/1905.00546 * Weights: https://github.com/facebookresearch/semi-supervised-ImageNet1K-models (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) * Squeeze-and-Excitation Networks * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 * Code: Added to ResNet base, this is current version going forward, old `senet.py` is being deprecated * ECAResNet (ECA-Net) * Paper: `ECA-Net: Efficient Channel Attention for Deep CNN` - https://arxiv.org/abs/1910.03151v4 * Code: Added to ResNet base, ECA module contributed by @VRandme, reference https://github.com/BangguWu/ECANet ## Res2Net [[res2net.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/res2net.py)] * Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 * Code: https://github.com/gasvn/Res2Net ## ResNeSt [[resnest.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnest.py)] * Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 * Code: https://github.com/zhanghang1989/ResNeSt ## ReXNet [[rexnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/rexnet.py)] * Paper: `ReXNet: Diminishing Representational Bottleneck on CNN` - https://arxiv.org/abs/2007.00992 * Code: https://github.com/clovaai/rexnet ## Selective-Kernel Networks [[sknet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/sknet.py)] * Paper: `Selective-Kernel Networks` - https://arxiv.org/abs/1903.06586 * Code: https://github.com/implus/SKNet, https://github.com/clovaai/assembled-cnn ## SelecSLS [[selecsls.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/selecsls.py)] * Paper: `XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera` - https://arxiv.org/abs/1907.00837 * Code: https://github.com/mehtadushy/SelecSLS-Pytorch ## Squeeze-and-Excitation Networks [[senet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/senet.py)] NOTE: I am deprecating this version of the networks, the new ones are part of `resnet.py` * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 * Code: https://github.com/Cadene/pretrained-models.pytorch ## TResNet [[tresnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tresnet.py)] * Paper: `TResNet: High Performance GPU-Dedicated Architecture` - https://arxiv.org/abs/2003.13630 * Code: https://github.com/mrT23/TResNet ## VGG [[vgg.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vgg.py)] * Paper: `Very Deep Convolutional Networks For Large-Scale Image Recognition` - https://arxiv.org/pdf/1409.1556.pdf * Reference code: https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py ## Vision Transformer [[vision_transformer.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py)] * Paper: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 * Reference code and pretrained weights: https://github.com/google-research/vision_transformer ## VovNet V2 and V1 [[vovnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vovnet.py)] * Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 * Reference code: https://github.com/youngwanLEE/vovnet-detectron2 ## Xception [[xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xception.py)] * Paper: `Xception: Deep Learning with Depthwise Separable Convolutions` - https://arxiv.org/abs/1610.02357 * Code: https://github.com/Cadene/pretrained-models.pytorch ## Xception (Modified Aligned, Gluon) [[gluon_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/gluon_xception.py)] * Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 * Reference code: https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo, https://github.com/jfzhang95/pytorch-deeplab-xception/ ## Xception (Modified Aligned, TF) [[aligned_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/aligned_xception.py)] * Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 * Reference code: https://github.com/tensorflow/models/tree/master/research/deeplab
pytorch-image-models/docs/models.md/0
{ "file_path": "pytorch-image-models/docs/models.md", "repo_id": "pytorch-image-models", "token_count": 4347 }
185
# # Ensemble Adversarial Inception ResNet v2 **Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture). This particular model was trained for study of adversarial examples (adversarial training). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1804-00097, author = {Alexey Kurakin and Ian J. Goodfellow and Samy Bengio and Yinpeng Dong and Fangzhou Liao and Ming Liang and Tianyu Pang and Jun Zhu and Xiaolin Hu and Cihang Xie and Jianyu Wang and Zhishuai Zhang and Zhou Ren and Alan L. Yuille and Sangxia Huang and Yao Zhao and Yuzhe Zhao and Zhonglin Han and Junjiajia Long and Yerkebulan Berdibekov and Takuya Akiba and Seiya Tokui and Motoki Abe}, title = {Adversarial Attacks and Defences Competition}, journal = {CoRR}, volume = {abs/1804.00097}, year = {2018}, url = {http://arxiv.org/abs/1804.00097}, archivePrefix = {arXiv}, eprint = {1804.00097}, timestamp = {Thu, 31 Oct 2019 16:31:22 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: Ensemble Adversarial Paper: Title: Adversarial Attacks and Defences Competition URL: https://paperswithcode.com/paper/adversarial-attacks-and-defences-competition Models: - Name: ens_adv_inception_resnet_v2 In Collection: Ensemble Adversarial Metadata: FLOPs: 16959133120 Parameters: 55850000 File Size: 223774238 Architecture: - 1x1 Convolution - Auxiliary Classifier - Average Pooling - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inception-v3 Module - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: ens_adv_inception_resnet_v2 Crop Pct: '0.897' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L351 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 1.0% Top 5 Accuracy: 17.32% -->
pytorch-image-models/docs/models/.templates/models/ensemble-adversarial.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/ensemble-adversarial.md", "repo_id": "pytorch-image-models", "token_count": 1379 }
186
# (Legacy) SENet A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. The weights from this model were ported from Gluon. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{hu2019squeezeandexcitation, title={Squeeze-and-Excitation Networks}, author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, year={2019}, eprint={1709.01507}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Legacy SENet Paper: Title: Squeeze-and-Excitation Networks URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks Models: - Name: legacy_senet154 In Collection: Legacy SENet Metadata: FLOPs: 26659556016 Parameters: 115090000 File Size: 461488402 Architecture: - Convolution - Dense Connections - Global Average Pooling - Max Pooling - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_senet154 LR: 0.6 Epochs: 100 Layers: 154 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L440 Weights: http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.33% Top 5 Accuracy: 95.51% -->
pytorch-image-models/docs/models/.templates/models/legacy-senet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/legacy-senet.md", "repo_id": "pytorch-image-models", "token_count": 793 }
187
# RexNet **Rank Expansion Networks** (ReXNets) follow a set of new design principles for designing bottlenecks in image classification models. Authors refine each layer by 1) expanding the input channel size of the convolution layer and 2) replacing the [ReLU6s](https://www.paperswithcode.com/method/relu6). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{han2020rexnet, title={ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network}, author={Dongyoon Han and Sangdoo Yun and Byeongho Heo and YoungJoon Yoo}, year={2020}, eprint={2007.00992}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: RexNet Paper: Title: 'ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network' URL: https://paperswithcode.com/paper/rexnet-diminishing-representational Models: - Name: rexnet_100 In Collection: RexNet Metadata: FLOPs: 509989377 Parameters: 4800000 File Size: 19417552 Architecture: - Batch Normalization - Convolution - Dropout - ReLU6 - Residual Connection Tasks: - Image Classification Training Techniques: - Label Smoothing - Linear Warmup With Cosine Annealing - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: rexnet_100 LR: 0.5 Epochs: 400 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Label Smoothing: 0.1 Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L212 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.86% Top 5 Accuracy: 93.88% - Name: rexnet_130 In Collection: RexNet Metadata: FLOPs: 848364461 Parameters: 7560000 File Size: 30508197 Architecture: - Batch Normalization - Convolution - Dropout - ReLU6 - Residual Connection Tasks: - Image Classification Training Techniques: - Label Smoothing - Linear Warmup With Cosine Annealing - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: rexnet_130 LR: 0.5 Epochs: 400 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Label Smoothing: 0.1 Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L218 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.49% Top 5 Accuracy: 94.67% - Name: rexnet_150 In Collection: RexNet Metadata: FLOPs: 1122374469 Parameters: 9730000 File Size: 39227315 Architecture: - Batch Normalization - Convolution - Dropout - ReLU6 - Residual Connection Tasks: - Image Classification Training Techniques: - Label Smoothing - Linear Warmup With Cosine Annealing - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: rexnet_150 LR: 0.5 Epochs: 400 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Label Smoothing: 0.1 Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L224 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.31% Top 5 Accuracy: 95.16% - Name: rexnet_200 In Collection: RexNet Metadata: FLOPs: 1960224938 Parameters: 16370000 File Size: 65862221 Architecture: - Batch Normalization - Convolution - Dropout - ReLU6 - Residual Connection Tasks: - Image Classification Training Techniques: - Label Smoothing - Linear Warmup With Cosine Annealing - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: rexnet_200 LR: 0.5 Epochs: 400 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Label Smoothing: 0.1 Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L230 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.63% Top 5 Accuracy: 95.67% -->
pytorch-image-models/docs/models/.templates/models/rexnet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/rexnet.md", "repo_id": "pytorch-image-models", "token_count": 2278 }
188
# (Tensorflow) MobileNet v3 **MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-02244, author = {Andrew Howard and Mark Sandler and Grace Chu and Liang{-}Chieh Chen and Bo Chen and Mingxing Tan and Weijun Wang and Yukun Zhu and Ruoming Pang and Vijay Vasudevan and Quoc V. Le and Hartwig Adam}, title = {Searching for MobileNetV3}, journal = {CoRR}, volume = {abs/1905.02244}, year = {2019}, url = {http://arxiv.org/abs/1905.02244}, archivePrefix = {arXiv}, eprint = {1905.02244}, timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: TF MobileNet V3 Paper: Title: Searching for MobileNetV3 URL: https://paperswithcode.com/paper/searching-for-mobilenetv3 Models: - Name: tf_mobilenetv3_large_075 In Collection: TF MobileNet V3 Metadata: FLOPs: 194323712 Parameters: 3990000 File Size: 16097377 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: tf_mobilenetv3_large_075 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L394 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.45% Top 5 Accuracy: 91.34% - Name: tf_mobilenetv3_large_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 274535288 Parameters: 5480000 File Size: 22076649 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: tf_mobilenetv3_large_100 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L403 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.51% Top 5 Accuracy: 92.61% - Name: tf_mobilenetv3_large_minimal_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 267216928 Parameters: 3920000 File Size: 15836368 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: tf_mobilenetv3_large_minimal_100 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L412 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.24% Top 5 Accuracy: 90.64% - Name: tf_mobilenetv3_small_075 In Collection: TF MobileNet V3 Metadata: FLOPs: 48457664 Parameters: 2040000 File Size: 8242701 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: tf_mobilenetv3_small_075 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bilinear RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L421 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 65.72% Top 5 Accuracy: 86.13% - Name: tf_mobilenetv3_small_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 65450600 Parameters: 2540000 File Size: 10256398 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: tf_mobilenetv3_small_100 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bilinear RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L430 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 67.92% Top 5 Accuracy: 87.68% - Name: tf_mobilenetv3_small_minimal_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 60827936 Parameters: 2040000 File Size: 8258083 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: tf_mobilenetv3_small_minimal_100 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bilinear RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L439 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 62.91% Top 5 Accuracy: 84.24% -->
pytorch-image-models/docs/models/.templates/models/tf-mobilenet-v3.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/tf-mobilenet-v3.md", "repo_id": "pytorch-image-models", "token_count": 3951 }
189
# Adversarial Inception v3 **Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). This particular model was trained for study of adversarial examples (adversarial training). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('adv_inception_v3', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `adv_inception_v3`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('adv_inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1804-00097, author = {Alexey Kurakin and Ian J. Goodfellow and Samy Bengio and Yinpeng Dong and Fangzhou Liao and Ming Liang and Tianyu Pang and Jun Zhu and Xiaolin Hu and Cihang Xie and Jianyu Wang and Zhishuai Zhang and Zhou Ren and Alan L. Yuille and Sangxia Huang and Yao Zhao and Yuzhe Zhao and Zhonglin Han and Junjiajia Long and Yerkebulan Berdibekov and Takuya Akiba and Seiya Tokui and Motoki Abe}, title = {Adversarial Attacks and Defences Competition}, journal = {CoRR}, volume = {abs/1804.00097}, year = {2018}, url = {http://arxiv.org/abs/1804.00097}, archivePrefix = {arXiv}, eprint = {1804.00097}, timestamp = {Thu, 31 Oct 2019 16:31:22 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: Adversarial Inception v3 Paper: Title: Adversarial Attacks and Defences Competition URL: https://paperswithcode.com/paper/adversarial-attacks-and-defences-competition Models: - Name: adv_inception_v3 In Collection: Adversarial Inception v3 Metadata: FLOPs: 7352418880 Parameters: 23830000 File Size: 95549439 Architecture: - 1x1 Convolution - Auxiliary Classifier - Average Pooling - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inception-v3 Module - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: adv_inception_v3 Crop Pct: '0.875' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v3.py#L456 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/adv_inception_v3-9e27bd63.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.58% Top 5 Accuracy: 93.74% -->
pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx", "repo_id": "pytorch-image-models", "token_count": 2247 }
190
# (Gluon) ResNet **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('gluon_resnet101_v1b', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `gluon_resnet101_v1b`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('gluon_resnet101_v1b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/HeZRS15, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Deep Residual Learning for Image Recognition}, journal = {CoRR}, volume = {abs/1512.03385}, year = {2015}, url = {http://arxiv.org/abs/1512.03385}, archivePrefix = {arXiv}, eprint = {1512.03385}, timestamp = {Wed, 17 Apr 2019 17:23:45 +0200}, biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: Gloun ResNet Paper: Title: Deep Residual Learning for Image Recognition URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition Models: - Name: gluon_resnet101_v1b In Collection: Gloun ResNet Metadata: FLOPs: 10068547584 Parameters: 44550000 File Size: 178723172 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet101_v1b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L89 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.3% Top 5 Accuracy: 94.53% - Name: gluon_resnet101_v1c In Collection: Gloun ResNet Metadata: FLOPs: 10376567296 Parameters: 44570000 File Size: 178802575 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet101_v1c Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L113 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.53% Top 5 Accuracy: 94.59% - Name: gluon_resnet101_v1d In Collection: Gloun ResNet Metadata: FLOPs: 10377018880 Parameters: 44570000 File Size: 178802755 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet101_v1d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L138 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.4% Top 5 Accuracy: 95.02% - Name: gluon_resnet101_v1s In Collection: Gloun ResNet Metadata: FLOPs: 11805511680 Parameters: 44670000 File Size: 179221777 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet101_v1s Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L166 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.29% Top 5 Accuracy: 95.16% - Name: gluon_resnet152_v1b In Collection: Gloun ResNet Metadata: FLOPs: 14857660416 Parameters: 60190000 File Size: 241534001 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet152_v1b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L97 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.69% Top 5 Accuracy: 94.73% - Name: gluon_resnet152_v1c In Collection: Gloun ResNet Metadata: FLOPs: 15165680128 Parameters: 60210000 File Size: 241613404 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet152_v1c Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L121 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.91% Top 5 Accuracy: 94.85% - Name: gluon_resnet152_v1d In Collection: Gloun ResNet Metadata: FLOPs: 15166131712 Parameters: 60210000 File Size: 241613584 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet152_v1d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L147 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.48% Top 5 Accuracy: 95.2% - Name: gluon_resnet152_v1s In Collection: Gloun ResNet Metadata: FLOPs: 16594624512 Parameters: 60320000 File Size: 242032606 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet152_v1s Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L175 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.02% Top 5 Accuracy: 95.42% - Name: gluon_resnet18_v1b In Collection: Gloun ResNet Metadata: FLOPs: 2337073152 Parameters: 11690000 File Size: 46816736 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet18_v1b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L65 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 70.84% Top 5 Accuracy: 89.76% - Name: gluon_resnet34_v1b In Collection: Gloun ResNet Metadata: FLOPs: 4718469120 Parameters: 21800000 File Size: 87295112 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet34_v1b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L73 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.59% Top 5 Accuracy: 92.0% - Name: gluon_resnet50_v1b In Collection: Gloun ResNet Metadata: FLOPs: 5282531328 Parameters: 25560000 File Size: 102493763 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet50_v1b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L81 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.58% Top 5 Accuracy: 93.72% - Name: gluon_resnet50_v1c In Collection: Gloun ResNet Metadata: FLOPs: 5590551040 Parameters: 25580000 File Size: 102573166 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet50_v1c Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L105 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.01% Top 5 Accuracy: 93.99% - Name: gluon_resnet50_v1d In Collection: Gloun ResNet Metadata: FLOPs: 5591002624 Parameters: 25580000 File Size: 102573346 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet50_v1d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L129 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.06% Top 5 Accuracy: 94.46% - Name: gluon_resnet50_v1s In Collection: Gloun ResNet Metadata: FLOPs: 7019495424 Parameters: 25680000 File Size: 102992368 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnet50_v1s Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L156 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.7% Top 5 Accuracy: 94.25% -->
pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx", "repo_id": "pytorch-image-models", "token_count": 7210 }
191
# SK-ResNet **SK ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNet are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('skresnet18', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `skresnet18`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('skresnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{li2019selective, title={Selective Kernel Networks}, author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, year={2019}, eprint={1903.06586}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: SKResNet Paper: Title: Selective Kernel Networks URL: https://paperswithcode.com/paper/selective-kernel-networks Models: - Name: skresnet18 In Collection: SKResNet Metadata: FLOPs: 2333467136 Parameters: 11960000 File Size: 47923238 Architecture: - Convolution - Dense Connections - Global Average Pooling - Max Pooling - Residual Connection - Selective Kernel - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x GPUs ID: skresnet18 LR: 0.1 Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L148 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.03% Top 5 Accuracy: 91.17% - Name: skresnet34 In Collection: SKResNet Metadata: FLOPs: 4711849952 Parameters: 22280000 File Size: 89299314 Architecture: - Convolution - Dense Connections - Global Average Pooling - Max Pooling - Residual Connection - Selective Kernel - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x GPUs ID: skresnet34 LR: 0.1 Epochs: 100 Layers: 34 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L165 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.93% Top 5 Accuracy: 93.32% -->
pytorch-image-models/hfdocs/source/models/skresnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/skresnet.mdx", "repo_id": "pytorch-image-models", "token_count": 2082 }
192
# Data [[autodoc]] timm.data.create_dataset [[autodoc]] timm.data.create_loader [[autodoc]] timm.data.create_transform [[autodoc]] timm.data.resolve_data_config
pytorch-image-models/hfdocs/source/reference/data.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/reference/data.mdx", "repo_id": "pytorch-image-models", "token_count": 67 }
193
from abc import abstractmethod class Reader: def __init__(self): pass @abstractmethod def _filename(self, index, basename=False, absolute=False): pass def filename(self, index, basename=False, absolute=False): return self._filename(index, basename=basename, absolute=absolute) def filenames(self, basename=False, absolute=False): return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))]
pytorch-image-models/timm/data/readers/reader.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader.py", "repo_id": "pytorch-image-models", "token_count": 171 }
194
""" Activations A collection of jit-scripted activations fn and modules with a common interface so that they can easily be swapped. All have an `inplace` arg even if not used. All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted versions if they contain in-place ops. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from torch.nn import functional as F @torch.jit.script def swish_jit(x, inplace: bool = False): """Swish - Described in: https://arxiv.org/abs/1710.05941 """ return x.mul(x.sigmoid()) @torch.jit.script def mish_jit(x, _inplace: bool = False): """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 """ return x.mul(F.softplus(x).tanh()) class SwishJit(nn.Module): def __init__(self, inplace: bool = False): super(SwishJit, self).__init__() def forward(self, x): return swish_jit(x) class MishJit(nn.Module): def __init__(self, inplace: bool = False): super(MishJit, self).__init__() def forward(self, x): return mish_jit(x) @torch.jit.script def hard_sigmoid_jit(x, inplace: bool = False): # return F.relu6(x + 3.) / 6. return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? class HardSigmoidJit(nn.Module): def __init__(self, inplace: bool = False): super(HardSigmoidJit, self).__init__() def forward(self, x): return hard_sigmoid_jit(x) @torch.jit.script def hard_swish_jit(x, inplace: bool = False): # return x * (F.relu6(x + 3.) / 6) return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? class HardSwishJit(nn.Module): def __init__(self, inplace: bool = False): super(HardSwishJit, self).__init__() def forward(self, x): return hard_swish_jit(x) @torch.jit.script def hard_mish_jit(x, inplace: bool = False): """ Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md """ return 0.5 * x * (x + 2).clamp(min=0, max=2) class HardMishJit(nn.Module): def __init__(self, inplace: bool = False): super(HardMishJit, self).__init__() def forward(self, x): return hard_mish_jit(x)
pytorch-image-models/timm/layers/activations_jit.py/0
{ "file_path": "pytorch-image-models/timm/layers/activations_jit.py", "repo_id": "pytorch-image-models", "token_count": 1008 }
195
""" Norm Layer Factory Create norm modules by string (to mirror create_act and creat_norm-act fns) Copyright 2022 Ross Wightman """ import functools import types from typing import Type import torch.nn as nn from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm from torchvision.ops.misc import FrozenBatchNorm2d _NORM_MAP = dict( batchnorm=nn.BatchNorm2d, batchnorm2d=nn.BatchNorm2d, batchnorm1d=nn.BatchNorm1d, groupnorm=GroupNorm, groupnorm1=GroupNorm1, layernorm=LayerNorm, layernorm2d=LayerNorm2d, rmsnorm=RmsNorm, frozenbatchnorm2d=FrozenBatchNorm2d, ) _NORM_TYPES = {m for n, m in _NORM_MAP.items()} def create_norm_layer(layer_name, num_features, **kwargs): layer = get_norm_layer(layer_name) layer_instance = layer(num_features, **kwargs) return layer_instance def get_norm_layer(norm_layer): if norm_layer is None: return None assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) norm_kwargs = {} # unbind partial fn, so args can be rebound later if isinstance(norm_layer, functools.partial): norm_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): if not norm_layer: return None layer_name = norm_layer.replace('_', '') norm_layer = _NORM_MAP[layer_name] else: norm_layer = norm_layer if norm_kwargs: norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args return norm_layer
pytorch-image-models/timm/layers/create_norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/create_norm.py", "repo_id": "pytorch-image-models", "token_count": 630 }
196
""" Lambda Layer Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` - https://arxiv.org/abs/2102.08602 @misc{2102.08602, Author = {Irwan Bello}, Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, Year = {2021}, } Status: This impl is a WIP. Code snippets in the paper were used as reference but good chance some details are missing/wrong. I've only implemented local lambda conv based pos embeddings. For a PyTorch impl that includes other embedding options checkout https://github.com/lucidrains/lambda-networks Hacked together by / Copyright 2021 Ross Wightman """ import torch from torch import nn import torch.nn.functional as F from .grid import ndgrid from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ def rel_pos_indices(size): size = to_2tuple(size) pos = torch.stack(ndgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) rel_pos = pos[:, None, :] - pos[:, :, None] rel_pos[0] += size[0] - 1 rel_pos[1] += size[1] - 1 return rel_pos # 2, H * W, H * W class LambdaLayer(nn.Module): """Lambda Layer Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` - https://arxiv.org/abs/2102.08602 NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. The internal dimensions of the lambda module are controlled via the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query (q) and key (k) dimension are determined by * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None * q = num_heads * dim_head, k = dim_head * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W stride (int): output stride of the module, avg pool used if stride == 2 num_heads (int): parallel attention heads. dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9) qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool): add bias to q, k, and v projections """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, qk_ratio=1.0, qkv_bias=False): super().__init__() dim_out = dim_out or dim assert dim_out % num_heads == 0, ' should be divided by num_heads' self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.num_heads = num_heads self.dim_v = dim_out // num_heads self.qkv = nn.Conv2d( dim, num_heads * self.dim_qk + self.dim_qk + self.dim_v, kernel_size=1, bias=qkv_bias) self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) self.norm_v = nn.BatchNorm2d(self.dim_v) if r is not None: # local lambda convolution for pos self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) self.pos_emb = None self.rel_pos_indices = None else: # relative pos embedding assert feat_size is not None feat_size = to_2tuple(feat_size) rel_size = [2 * s - 1 for s in feat_size] self.conv_lambda = None self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in if self.conv_lambda is not None: trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5) if self.pos_emb is not None: trunc_normal_(self.pos_emb, std=.02) def forward(self, x): B, C, H, W = x.shape M = H * W qkv = self.qkv(x) q, k, v = torch.split(qkv, [ self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M content_lam = k @ v # B, K, V content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V if self.pos_emb is None: position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V else: # FIXME relative pos embedding path not fully verified pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W out = self.pool(out) return out
pytorch-image-models/timm/layers/lambda_layer.py/0
{ "file_path": "pytorch-image-models/timm/layers/lambda_layer.py", "repo_id": "pytorch-image-models", "token_count": 2611 }
197
""" Selective Kernel Convolution/Attention Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from .conv_bn_act import ConvNormActAa from .helpers import make_divisible from .trace_utils import _assert def _kernel_valid(k): if isinstance(k, (list, tuple)): for ki in k: return _kernel_valid(ki) assert k >= 3 and k % 2 class SelectiveKernelAttn(nn.Module): def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): """ Selective Kernel Attention Module Selective Kernel attention mechanism factored out into its own module. """ super(SelectiveKernelAttn, self).__init__() self.num_paths = num_paths self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) self.bn = norm_layer(attn_channels) self.act = act_layer(inplace=True) self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) def forward(self, x): _assert(x.shape[1] == self.num_paths, '') x = x.sum(1).mean((2, 3), keepdim=True) x = self.fc_reduce(x) x = self.bn(x) x = self.act(x) x = self.fc_select(x) B, C, H, W = x.shape x = x.view(B, self.num_paths, C // self.num_paths, H, W) x = torch.softmax(x, dim=1) return x class SelectiveKernel(nn.Module): def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None): """ Selective Kernel Convolution Module As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. Largest change is the input split, which divides the input channels across each convolution path, this can be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps the parameter count from ballooning when the convolutions themselves don't have groups, but still provides a noteworthy increase in performance over similar param count models without this attention layer. -Ross W Args: in_channels (int): module input (feature) channel count out_channels (int): module output (feature) channel count kernel_size (int, list): kernel size for each convolution branch stride (int): stride for convolutions dilation (int): dilation for module as a whole, impacts dilation of each branch groups (int): number of groups for each branch rd_ratio (int, float): reduction factor for attention features keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, can be viewed as grouping by path, output expands to module out_channels count act_layer (nn.Module): activation layer to use norm_layer (nn.Module): batchnorm/norm layer to use aa_layer (nn.Module): anti-aliasing module drop_layer (nn.Module): spatial drop module in convs (drop block, etc) """ super(SelectiveKernel, self).__init__() out_channels = out_channels or in_channels kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation _kernel_valid(kernel_size) if not isinstance(kernel_size, list): kernel_size = [kernel_size] * 2 if keep_3x3: dilation = [dilation * (k - 1) // 2 for k in kernel_size] kernel_size = [3] * len(kernel_size) else: dilation = [dilation] * len(kernel_size) self.num_paths = len(kernel_size) self.in_channels = in_channels self.out_channels = out_channels self.split_input = split_input if self.split_input: assert in_channels % self.num_paths == 0 in_channels = in_channels // self.num_paths groups = min(out_channels, groups) conv_kwargs = dict( stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_layer=drop_layer) self.paths = nn.ModuleList([ ConvNormActAa(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) for k, d in zip(kernel_size, dilation)]) attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) def forward(self, x): if self.split_input: x_split = torch.split(x, self.in_channels // self.num_paths, 1) x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] else: x_paths = [op(x) for op in self.paths] x = torch.stack(x_paths, dim=1) x_attn = self.attn(x) x = x * x_attn x = torch.sum(x, dim=1) return x
pytorch-image-models/timm/layers/selective_kernel.py/0
{ "file_path": "pytorch-image-models/timm/layers/selective_kernel.py", "repo_id": "pytorch-image-models", "token_count": 2318 }
198
from .beit import * from .byoanet import * from .byobnet import * from .cait import * from .coat import * from .convit import * from .convmixer import * from .convnext import * from .crossvit import * from .cspnet import * from .davit import * from .deit import * from .densenet import * from .dla import * from .dpn import * from .edgenext import * from .efficientformer import * from .efficientformer_v2 import * from .efficientnet import * from .efficientvit_mit import * from .efficientvit_msra import * from .eva import * from .fastvit import * from .focalnet import * from .gcvit import * from .ghostnet import * from .hardcorenas import * from .hgnet import * from .hrnet import * from .inception_next import * from .inception_resnet_v2 import * from .inception_v3 import * from .inception_v4 import * from .levit import * from .maxxvit import * from .metaformer import * from .mlp_mixer import * from .mobilenetv3 import * from .mobilevit import * from .mvitv2 import * from .nasnet import * from .nest import * from .nextvit import * from .nfnet import * from .pit import * from .pnasnet import * from .pvt_v2 import * from .regnet import * from .repghost import * from .repvit import * from .res2net import * from .resnest import * from .resnet import * from .resnetv2 import * from .rexnet import * from .selecsls import * from .senet import * from .sequencer import * from .sknet import * from .swin_transformer import * from .swin_transformer_v2 import * from .swin_transformer_v2_cr import * from .tiny_vit import * from .tnt import * from .tresnet import * from .twins import * from .vgg import * from .visformer import * from .vision_transformer import * from .vision_transformer_hybrid import * from .vision_transformer_relpos import * from .vision_transformer_sam import * from .volo import * from .vovnet import * from .xception import * from .xception_aligned import * from .xcit import * from ._builder import build_model_with_cfg, load_pretrained, load_custom_pretrained, resolve_pretrained_cfg, \ set_pretrained_download_progress, set_pretrained_check_hash from ._factory import create_model, parse_model_name, safe_model_name from ._features import FeatureInfo, FeatureHooks, FeatureHookNet, FeatureListNet, FeatureDictNet from ._features_fx import FeatureGraphNet, GraphExtractNet, create_feature_extractor, \ register_notrace_module, is_notrace_module, get_notrace_modules, \ register_notrace_function, is_notrace_function, get_notrace_functions from ._helpers import clean_state_dict, load_state_dict, load_checkpoint, remap_state_dict, resume_checkpoint from ._hub import load_model_config_from_hf, load_state_dict_from_hf, push_to_hf_hub from ._manipulate import model_parameters, named_apply, named_modules, named_modules_with_params, \ group_modules, group_parameters, checkpoint_seq, adapt_input_conv from ._pretrained import PretrainedCfg, DefaultCfg, filter_pretrained_cfg from ._prune import adapt_model_from_string from ._registry import split_model_name_tag, get_arch_name, generate_default_cfgs, register_model, \ register_model_deprecations, model_entrypoint, list_models, list_pretrained, get_deprecated_models, \ is_model, list_modules, is_model_in_modules, is_model_pretrained, get_pretrained_cfg, get_pretrained_cfg_value
pytorch-image-models/timm/models/__init__.py/0
{ "file_path": "pytorch-image-models/timm/models/__init__.py", "repo_id": "pytorch-image-models", "token_count": 1077 }
199
""" PyTorch implementation of DualPathNetworks Based on original MXNet implementation https://github.com/cypw/DPNs with many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. This implementation is compatible with the pretrained weights from cypw's MXNet implementation. Hacked together by / Copyright 2020 Ross Wightman """ from collections import OrderedDict from functools import partial from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import BatchNormAct2d, ConvNormAct, create_conv2d, create_classifier, get_norm_act_layer from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['DPN'] class CatBnAct(nn.Module): def __init__(self, in_chs, norm_layer=BatchNormAct2d): super(CatBnAct, self).__init__() self.bn = norm_layer(in_chs, eps=0.001) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (torch.Tensor) pass def forward(self, x): if isinstance(x, tuple): x = torch.cat(x, dim=1) return self.bn(x) class BnActConv2d(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): super(BnActConv2d, self).__init__() self.bn = norm_layer(in_chs, eps=0.001) self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) def forward(self, x): return self.conv(self.bn(x)) class DualPathBlock(nn.Module): def __init__( self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False, ): super(DualPathBlock, self).__init__() self.num_1x1_c = num_1x1_c self.inc = inc self.b = b if block_type == 'proj': self.key_stride = 1 self.has_proj = True elif block_type == 'down': self.key_stride = 2 self.has_proj = True else: assert block_type == 'normal' self.key_stride = 1 self.has_proj = False self.c1x1_w_s1 = None self.c1x1_w_s2 = None if self.has_proj: # Using different member names here to allow easier parameter key matching for conversion if self.key_stride == 2: self.c1x1_w_s2 = BnActConv2d( in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) else: self.c1x1_w_s1 = BnActConv2d( in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) self.c3x3_b = BnActConv2d( in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) if b: self.c1x1_c = CatBnAct(in_chs=num_3x3_b) self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) else: self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) self.c1x1_c1 = None self.c1x1_c2 = None @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] pass def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: if isinstance(x, tuple): x_in = torch.cat(x, dim=1) else: x_in = x if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: # self.has_proj == False, torchscript requires condition on module == None x_s1 = x[0] x_s2 = x[1] else: # self.has_proj == True if self.c1x1_w_s1 is not None: # self.key_stride = 1 x_s = self.c1x1_w_s1(x_in) else: # self.key_stride = 2 x_s = self.c1x1_w_s2(x_in) x_s1 = x_s[:, :self.num_1x1_c, :, :] x_s2 = x_s[:, self.num_1x1_c:, :, :] x_in = self.c1x1_a(x_in) x_in = self.c3x3_b(x_in) x_in = self.c1x1_c(x_in) if self.c1x1_c1 is not None: # self.b == True, using None check for torchscript compat out1 = self.c1x1_c1(x_in) out2 = self.c1x1_c2(x_in) else: out1 = x_in[:, :self.num_1x1_c, :, :] out2 = x_in[:, self.num_1x1_c:, :, :] resid = x_s1 + out1 dense = torch.cat([x_s2, out2], dim=1) return resid, dense class DPN(nn.Module): def __init__( self, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), k_r=96, groups=32, num_classes=1000, in_chans=3, output_stride=32, global_pool='avg', small=False, num_init_features=64, b=False, drop_rate=0., norm_layer='batchnorm2d', act_layer='relu', fc_act_layer='elu', ): super(DPN, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.b = b assert output_stride == 32 # FIXME look into dilation support norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=act_layer), eps=.001) fc_norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=fc_act_layer), eps=.001, inplace=False) bw_factor = 1 if small else 4 blocks = OrderedDict() # conv1 blocks['conv1_1'] = ConvNormAct( in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] # conv2 bw = 64 * bw_factor inc = inc_sec[0] r = (k_r * bw) // (64 * bw_factor) blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) in_chs = bw + 3 * inc for i in range(2, k_sec[0] + 1): blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] # conv3 bw = 128 * bw_factor inc = inc_sec[1] r = (k_r * bw) // (64 * bw_factor) blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[1] + 1): blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] # conv4 bw = 256 * bw_factor inc = inc_sec[2] r = (k_r * bw) // (64 * bw_factor) blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[2] + 1): blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] # conv5 bw = 512 * bw_factor inc = inc_sec[3] r = (k_r * bw) // (64 * bw_factor) blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[3] + 1): blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) self.num_features = in_chs self.features = nn.Sequential(blocks) # Using 1x1 conv for the FC layer to allow the extra pooling scheme self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^features\.conv1', blocks=[ (r'^features\.conv(\d+)' if coarse else r'^features\.conv(\d+)_(\d+)', None), (r'^features\.conv5_bn_ac', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() def forward_features(self, x): return self.features(x) def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) if pre_logits: return self.flatten(x) x = self.classifier(x) return self.flatten(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_dpn(variant, pretrained=False, **kwargs): return build_model_with_cfg( DPN, variant, pretrained, feature_cfg=dict(feature_concat=True, flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'dpn48b.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'dpn68.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn68b.ra_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'dpn68b.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn92.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn98.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn131.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn107.mx_in1k': _cfg(hf_hub_id='timm/') }) @register_model def dpn48b(pretrained=False, **kwargs) -> DPN: model_args = dict( small=True, num_init_features=10, k_r=128, groups=32, b=True, k_sec=(3, 4, 6, 3), inc_sec=(16, 32, 32, 64), act_layer='silu') return _create_dpn('dpn48b', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn68(pretrained=False, **kwargs) -> DPN: model_args = dict( small=True, num_init_features=10, k_r=128, groups=32, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) return _create_dpn('dpn68', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn68b(pretrained=False, **kwargs) -> DPN: model_args = dict( small=True, num_init_features=10, k_r=128, groups=32, b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) return _create_dpn('dpn68b', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn92(pretrained=False, **kwargs) -> DPN: model_args = dict( num_init_features=64, k_r=96, groups=32, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128)) return _create_dpn('dpn92', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn98(pretrained=False, **kwargs) -> DPN: model_args = dict( num_init_features=96, k_r=160, groups=40, k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128)) return _create_dpn('dpn98', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn131(pretrained=False, **kwargs) -> DPN: model_args = dict( num_init_features=128, k_r=160, groups=40, k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128)) return _create_dpn('dpn131', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn107(pretrained=False, **kwargs) -> DPN: model_args = dict( num_init_features=128, k_r=200, groups=50, k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128)) return _create_dpn('dpn107', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/dpn.py/0
{ "file_path": "pytorch-image-models/timm/models/dpn.py", "repo_id": "pytorch-image-models", "token_count": 6985 }
200
from ._builder import * from ._helpers import * from ._manipulate import * from ._prune import * import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
pytorch-image-models/timm/models/helpers.py/0
{ "file_path": "pytorch-image-models/timm/models/helpers.py", "repo_id": "pytorch-image-models", "token_count": 64 }
201
""" NasNet-A (Large) nasnetalarge implementation grabbed from Cadene's pretrained models https://github.com/Cadene/pretrained-models.pytorch """ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['NASNetALarge'] class ActConvBn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): super(ActConvBn, self).__init__() self.act = nn.ReLU() self.conv = create_conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) def forward(self, x): x = self.act(x) x = self.conv(x) x = self.bn(x) return x class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): super(SeparableConv2d, self).__init__() self.depthwise_conv2d = create_conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels) self.pointwise_conv2d = create_conv2d( in_channels, out_channels, kernel_size=1, padding=0) def forward(self, x): x = self.depthwise_conv2d(x) x = self.pointwise_conv2d(x) return x class BranchSeparables(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): super(BranchSeparables, self).__init__() middle_channels = out_channels if stem_cell else in_channels self.act_1 = nn.ReLU() self.separable_1 = SeparableConv2d( in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) self.act_2 = nn.ReLU(inplace=True) self.separable_2 = SeparableConv2d( middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) def forward(self, x): x = self.act_1(x) x = self.separable_1(x) x = self.bn_sep_1(x) x = self.act_2(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class CellStem0(nn.Module): def __init__(self, stem_size, num_channels=42, pad_type=''): super(CellStem0, self).__init__() self.num_channels = num_channels self.stem_size = stem_size self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x): x1 = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x1) x_comb_iter_0_right = self.comb_iter_0_right(x) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x1) x_comb_iter_1_right = self.comb_iter_1_right(x) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x1) x_comb_iter_2_right = self.comb_iter_2_right(x) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x1) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class CellStem1(nn.Module): def __init__(self, stem_size, num_channels, pad_type=''): super(CellStem1, self).__init__() self.num_channels = num_channels self.stem_size = stem_size self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) self.act = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) self.path_2 = nn.Sequential() self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x_conv0, x_stem_0): x_left = self.conv_1x1(x_stem_0) x_relu = self.act(x_conv0) # path 1 x_path1 = self.path_1(x_relu) # path 2 x_path2 = self.path_2(x_relu) # final path x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_comb_iter_0_left = self.comb_iter_0_left(x_left) x_comb_iter_0_right = self.comb_iter_0_right(x_right) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_right) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_left) x_comb_iter_2_right = self.comb_iter_2_right(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_left) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class FirstCell(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(FirstCell, self).__init__() self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) self.act = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) self.path_2 = nn.Sequential() self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) def forward(self, x, x_prev): x_relu = self.act(x_prev) x_path1 = self.path_1(x_relu) x_path2 = self.path_2(x_relu) x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class NormalCell(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(NormalCell, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class ReductionCell0(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(ReductionCell0, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class ReductionCell1(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class NASNetALarge(nn.Module): """NASNetALarge (6 @ 4032) """ def __init__( self, num_classes=1000, in_chans=3, stem_size=96, channel_multiplier=2, num_features=4032, output_stride=32, drop_rate=0., global_pool='avg', pad_type='same', ): super(NASNetALarge, self).__init__() self.num_classes = num_classes self.stem_size = stem_size self.num_features = num_features self.channel_multiplier = channel_multiplier assert output_stride == 32 channels = self.num_features // 24 # 24 is default value for the architecture self.conv0 = ConvNormAct( in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) self.cell_stem_0 = CellStem0( self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type) self.cell_stem_1 = CellStem1( self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) self.cell_0 = FirstCell( in_chs_left=channels, out_chs_left=channels // 2, in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_1 = NormalCell( in_chs_left=2 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_2 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_3 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_4 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_5 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.reduction_cell_0 = ReductionCell0( in_chs_left=6 * channels, out_chs_left=2 * channels, in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_6 = FirstCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_7 = NormalCell( in_chs_left=8 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_8 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_9 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_10 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_11 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.reduction_cell_1 = ReductionCell1( in_chs_left=12 * channels, out_chs_left=4 * channels, in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_12 = FirstCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_13 = NormalCell( in_chs_left=16 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_14 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_15 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_16 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_17 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.act = nn.ReLU(inplace=True) self.feature_info = [ dict(num_chs=96, reduction=2, module='conv0'), dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), dict(num_chs=4032, reduction=32, module='act'), ] self.global_pool, self.head_drop, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv0|cell_stem_[01]', blocks=[ (r'^cell_(\d+)', None), (r'^reduction_cell_0', (6,)), (r'^reduction_cell_1', (12,)), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.last_linear def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x_conv0 = self.conv0(x) x_stem_0 = self.cell_stem_0(x_conv0) x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) x_cell_0 = self.cell_0(x_stem_1, x_stem_0) x_cell_1 = self.cell_1(x_cell_0, x_stem_1) x_cell_2 = self.cell_2(x_cell_1, x_cell_0) x_cell_3 = self.cell_3(x_cell_2, x_cell_1) x_cell_4 = self.cell_4(x_cell_3, x_cell_2) x_cell_5 = self.cell_5(x_cell_4, x_cell_3) x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) x_cell_8 = self.cell_8(x_cell_7, x_cell_6) x_cell_9 = self.cell_9(x_cell_8, x_cell_7) x_cell_10 = self.cell_10(x_cell_9, x_cell_8) x_cell_11 = self.cell_11(x_cell_10, x_cell_9) x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) x_cell_14 = self.cell_14(x_cell_13, x_cell_12) x_cell_15 = self.cell_15(x_cell_14, x_cell_13) x_cell_16 = self.cell_16(x_cell_15, x_cell_14) x_cell_17 = self.cell_17(x_cell_16, x_cell_15) x = self.act(x_cell_17) return x def forward_head(self, x): x = self.global_pool(x) x = self.head_drop(x) x = self.last_linear(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_nasnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( NASNetALarge, variant, pretrained, feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model **kwargs, ) default_cfgs = generate_default_cfgs({ 'nasnetalarge.tf_in1k': { 'hf_hub_id': 'timm/', 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nasnetalarge-dc4a7b8b.pth', 'input_size': (3, 331, 331), 'pool_size': (11, 11), 'crop_pct': 0.911, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv0.conv', 'classifier': 'last_linear', }, }) @register_model def nasnetalarge(pretrained=False, **kwargs) -> NASNetALarge: """NASNet-A large model architecture. """ model_kwargs = dict(pad_type='same', **kwargs) return _create_nasnet('nasnetalarge', pretrained, **model_kwargs)
pytorch-image-models/timm/models/nasnet.py/0
{ "file_path": "pytorch-image-models/timm/models/nasnet.py", "repo_id": "pytorch-image-models", "token_count": 13247 }
202
"""PyTorch SelecSLS Net example for ImageNet Classification License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) Author: Dushyant Mehta (@mehtadushy) SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera, Mehta et al." https://arxiv.org/abs/1907.00837 Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch """ from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SelecSls'] # model_registry will add each entrypoint fn to this class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (List[torch.Tensor]) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (List[torch.Tensor]) pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x class SelectSeq(nn.Module): def __init__(self, mode='index', index=0): super(SelectSeq, self).__init__() self.mode = mode self.index = index @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor]) -> (torch.Tensor) pass def forward(self, x) -> torch.Tensor: if self.mode == 'index': return x[self.index] else: return torch.cat(x, dim=1) def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): if padding is None: padding = ((stride - 1) + dilation * (k - 1)) // 2 return nn.Sequential( nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(out_chs), nn.ReLU(inplace=True) ) class SelecSlsBlock(nn.Module): def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): super(SelecSlsBlock, self).__init__() self.stride = stride self.is_first = is_first assert stride in [1, 2] # Process input with 4 conv blocks with the same number of input and output channels self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) self.conv2 = conv_bn(mid_chs, mid_chs, 1) self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if not isinstance(x, list): x = [x] assert len(x) in [1, 2] d1 = self.conv1(x[0]) d2 = self.conv3(self.conv2(d1)) d3 = self.conv5(self.conv4(d2)) if self.is_first: out = self.conv6(torch.cat([d1, d2, d3], 1)) return [out, out] else: return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] class SelecSls(nn.Module): """SelecSls42 / SelecSls60 / SelecSls84 Parameters ---------- cfg : network config dictionary specifying block type, feature, and head args num_classes : int, default 1000 Number of classification classes. in_chans : int, default 3 Number of input (color) channels. drop_rate : float, default 0. Dropout probability before classifier, for training global_pool : str, default 'avg' Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' """ def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): self.num_classes = num_classes super(SelecSls, self).__init__() self.stem = conv_bn(in_chans, 32, stride=2) self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) self.num_features = cfg['num_features'] self.feature_info = cfg['feature_info'] self.global_pool, self.head_drop, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^features\.(\d+)', blocks_head=r'^head' ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.features(x) x = self.head(self.from_seq(x)) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_selecsls(variant, pretrained, **kwargs): cfg = {} feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] if variant.startswith('selecsls42'): cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 144, 144, True, 2), (144, 144, 144, 288, False, 1), (288, 0, 304, 304, True, 2), (304, 304, 304, 480, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.3'), dict(num_chs=480, reduction=16, module='features.5'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls42b': cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant.startswith('selecsls60'): cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 128, 128, True, 2), (128, 128, 128, 128, False, 1), (128, 128, 128, 288, False, 1), (288, 0, 288, 288, True, 2), (288, 288, 288, 288, False, 1), (288, 288, 288, 288, False, 1), (288, 288, 288, 416, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.4'), dict(num_chs=416, reduction=16, module='features.8'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls60b': cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant == 'selecsls84': cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 144, False, 1), (144, 0, 144, 144, True, 2), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 304, False, 1), (304, 0, 304, 304, True, 2), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 512, False, 1), ] feature_info.extend([ dict(num_chs=144, reduction=4, module='features.1'), dict(num_chs=304, reduction=8, module='features.6'), dict(num_chs=512, reduction=16, module='features.12'), ]) # Head can be replaced with alternative configurations depending on the problem cfg['head'] = [ (512, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 3, 1), ] cfg['num_features'] = 1280 feature_info.extend([ dict(num_chs=1024, reduction=32, module='head.1'), dict(num_chs=1280, reduction=64, module='head.3') ]) else: raise ValueError('Invalid net configuration ' + variant + ' !!!') cfg['feature_info'] = feature_info # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? return build_model_with_cfg( SelecSls, variant, pretrained, model_cfg=cfg, feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'selecsls42.untrained': _cfg( interpolation='bicubic'), 'selecsls42b.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60b.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls84.untrained': _cfg( interpolation='bicubic'), }) @register_model def selecsls42(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls42 model. """ return _create_selecsls('selecsls42', pretrained, **kwargs) @register_model def selecsls42b(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls42_B model. """ return _create_selecsls('selecsls42b', pretrained, **kwargs) @register_model def selecsls60(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls60 model. """ return _create_selecsls('selecsls60', pretrained, **kwargs) @register_model def selecsls60b(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls60_B model. """ return _create_selecsls('selecsls60b', pretrained, **kwargs) @register_model def selecsls84(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls84 model. """ return _create_selecsls('selecsls84', pretrained, **kwargs)
pytorch-image-models/timm/models/selecsls.py/0
{ "file_path": "pytorch-image-models/timm/models/selecsls.py", "repo_id": "pytorch-image-models", "token_count": 6442 }
203
""" Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'Exploring Plain Vision Transformer Backbones for Object Detection' - https://arxiv.org/abs/2203.16527 'Segment Anything Model (SAM)' - https://github.com/facebookresearch/segment-anything/ """ import logging from functools import partial from typing import Callable, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import PatchEmbed, Mlp, DropPath, PatchDropout, LayerNorm2d, ClassifierHead, NormMlpClassifierHead,\ Format, resample_abs_pos_embed_nhwc, RotaryEmbeddingCat, apply_rot_embed_cat, to_2tuple, use_fused_attn from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model from ._features_fx import register_notrace_function # model_registry will add each entrypoint fn to this __all__ = ['VisionTransformerSAM'] _logger = logging.getLogger(__name__) def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of query q. k_size (int): size of key k. rel_pos (Tensor): relative position embeddings (L, C). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] register_notrace_function(get_rel_pos) def get_decomposed_rel_pos_bias( q: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> torch.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. q_size (Tuple): spatial sequence size of query q with (q_h, q_w). k_size (Tuple): spatial sequence size of key k with (k_h, k_w). Returns: bias (Tensor): attention bias to add to attention map """ q_h, q_w = q_size k_h, k_w = k_size Rh = get_rel_pos(q_h, k_h, rel_pos_h) Rw = get_rel_pos(q_w, k_w, rel_pos_w) B, _, dim = q.shape r_q = q.reshape(B, q_h, q_w, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) attn_bias = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] return attn_bias.reshape(-1, q_h * q_w, k_h * k_w) class Attention(nn.Module): fused_attn: Final[bool] def __init__( self, dim, num_heads=8, qkv_bias=True, qk_norm=False, attn_drop=0., proj_drop=0., norm_layer=nn.LayerNorm, use_rel_pos: bool = False, input_size: Optional[Tuple[int, int]] = None, rope: Optional[nn.Module] = None, ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.use_rel_pos = use_rel_pos if self.use_rel_pos: assert rope is None assert ( input_size is not None ), "Input size must be provided if using relative positional encoding." # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros( 2 * input_size[0] - 1, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros( 2 * input_size[1] - 1, self.head_dim)) self.rope = rope def forward(self, x): B, H, W, _ = x.shape N = H * W x = x.reshape(B, N, -1) qkv = self.qkv(x).view(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # qkv with shape (3, B, nHead, H * W, C) q, k, v = qkv.reshape(3, B * self.num_heads, N, -1).unbind(0) # q, k, v with shape (B * nHead, H * W, C) q, k = self.q_norm(q), self.k_norm(k) if self.use_rel_pos: attn_bias = get_decomposed_rel_pos_bias(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) else: attn_bias = None if self.rope is not None: rope = self.rope.get_embed() q = apply_rot_embed_cat(q, rope).type_as(v) k = apply_rot_embed_cat(k, rope).type_as(v) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if attn_bias is not None: attn = attn + attn_bias attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.view(B, self.num_heads, N, -1).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = x.view(B, H, W, -1) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=True, qk_norm=False, proj_drop=0., attn_drop=0., init_values=None, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, use_rel_pos=False, window_size=0, input_size=None, rope=None, ): super().__init__() self.window_size = window_size self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, use_rel_pos=use_rel_pos, input_size=input_size if window_size == 0 else (window_size, window_size), rope=rope, ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = mlp_layer( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): B, H, W, _ = x.shape shortcut = x x = self.norm1(x) # Window partition pad_hw: Optional[Tuple[int, int]] = None if self.window_size > 0: x, pad_hw = window_partition(x, self.window_size) x = self.drop_path1(self.ls1(self.attn(x))) # Reverse window partition if self.window_size > 0: x = window_unpartition(x, self.window_size, (H, W), pad_hw) x = shortcut + x x = x.reshape(B, H * W, -1) # MLP is faster for N, L, C tensor x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) x = x.reshape(B, H, W, -1) return x def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: """ Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition """ B, H, W, C = x.shape pad_h = (window_size - H % window_size) % window_size pad_w = (window_size - W % window_size) % window_size x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) Hp, Wp = H + pad_h, W + pad_w x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows, (Hp, Wp) def window_unpartition( windows: torch.Tensor, window_size: int, hw: Tuple[int, int], pad_hw: Optional[Tuple[int, int]] = None, ) -> torch.Tensor: """ Window unpartition into original sequences and removing padding. Args: windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. pad_hw (Tuple): padded height and width (Hp, Wp). hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C]. """ Hp, Wp = pad_hw if pad_hw is not None else hw H, W = hw B = windows.shape[0] // (Hp * Wp // window_size // window_size) x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) x = x[:, :H, :W, :].contiguous() return x class VisionTransformerSAM(nn.Module): """ Vision Transformer for Segment-Anything Model(SAM) A PyTorch impl of : `Exploring Plain Vision Transformer Backbones for Object Detection` or `Segment Anything Model (SAM)` - https://arxiv.org/abs/2010.11929 """ def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, num_classes: int = 768, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4., qkv_bias: bool = True, qk_norm: bool = False, init_values: Optional[float] = None, pre_norm: bool = False, drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., weight_init: str = '', embed_layer: Callable = partial( PatchEmbed, output_fmt=Format.NHWC, strict_img_size=False), norm_layer: Optional[Callable] = nn.LayerNorm, act_layer: Optional[Callable] = nn.GELU, block_fn: Callable = Block, mlp_layer: Callable = Mlp, use_abs_pos: bool = True, use_rel_pos: bool = False, use_rope: bool = False, window_size: int = 14, global_attn_indexes: Tuple[int, ...] = (), neck_chans: int = 256, global_pool: str = 'avg', head_hidden_size: Optional[int] = None, ref_feat_shape: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None ): """ Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of image input channels. num_classes: Mumber of classes for classification head. global_pool: Type of global pooling for final sequence (default: 'token'). embed_dim: Transformer embedding dimension. depth: Depth of transformer. num_heads: Number of attention heads. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: Enable bias for qkv projections if True. init_values: Layer-scale init values (layer-scale enabled if not None). drop_rate: Head dropout rate. pos_drop_rate: Position embedding dropout rate. attn_drop_rate: Attention dropout rate. drop_path_rate: Stochastic depth rate. weight_init: Weight initialization scheme. embed_layer: Patch embedding layer. norm_layer: Normalization layer. act_layer: MLP activation layer. block_fn: Transformer block layer. use_abs_pos: If True, use absolute positional embeddings. use_rel_pos: If True, add relative positional embeddings to the attention map. use_rope: If True, add rotary position embeddings to q/k in attention block. window_size: Window size for window attention blocks. If 0, not use window attention. global_attn_indexes: Indexes for blocks using global attention. Used when window_size > 0. global_pool: Global pooling type. head_hidden_size: If set, use NormMlpHead ref_feat_shape: Tuple of reference feature shapes for ROPE, (global, local) """ super().__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.global_pool = global_pool # num_features for consistency with other models self.num_features = self.embed_dim = embed_dim self.grad_checkpointing = False self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, bias=not pre_norm, # disable bias if pre-norm is used ) grid_size = self.patch_embed.grid_size if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter(torch.zeros(1, grid_size[0], grid_size[1], embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout( patch_drop_rate, num_prefix_tokens=0, ) else: self.patch_drop = nn.Identity() self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() if use_rope: assert not use_rel_pos, "ROPE and relative pos embeddings should not be enabled at same time" if ref_feat_shape is not None: assert len(ref_feat_shape) == 2 ref_feat_shape_global = to_2tuple(ref_feat_shape[0]) ref_feat_shape_window = to_2tuple(ref_feat_shape[1]) else: ref_feat_shape_global = ref_feat_shape_window = None self.rope_global = RotaryEmbeddingCat( embed_dim // num_heads, in_pixels=False, feat_shape=grid_size, ref_feat_shape=ref_feat_shape_global, ) self.rope_window = RotaryEmbeddingCat( embed_dim // num_heads, in_pixels=False, feat_shape=to_2tuple(window_size), ref_feat_shape=ref_feat_shape_window, ) else: self.rope_global = None self.rope_window = None # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.Sequential(*[ block_fn( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, mlp_layer=mlp_layer, use_rel_pos=use_rel_pos, window_size=window_size if i not in global_attn_indexes else 0, input_size=grid_size, rope=self.rope_window if i not in global_attn_indexes else self.rope_global, ) for i in range(depth)]) if neck_chans: self.neck = nn.Sequential( nn.Conv2d( embed_dim, neck_chans, kernel_size=1, bias=False, ), LayerNorm2d(neck_chans), nn.Conv2d( neck_chans, neck_chans, kernel_size=3, padding=1, bias=False, ), LayerNorm2d(neck_chans), ) self.num_features = neck_chans else: if head_hidden_size: self.neck = nn.Identity() else: # should have a final norm with standard ClassifierHead self.neck = LayerNorm2d(embed_dim) neck_chans = embed_dim # Classifier Head if head_hidden_size: self.head = NormMlpClassifierHead( neck_chans, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=drop_rate, ) else: self.head = ClassifierHead( neck_chans, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'dist_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes=0, global_pool=None): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.patch_embed(x) if self.pos_embed is not None: # dynamically resize abs pos embedding if needed x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3]) x = self.pos_drop(x) x = self.patch_drop(x) x = self.norm_pre(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.neck(x.permute(0, 3, 1, 2)) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn( state_dict, model, ): """ Remap SAM checkpoints -> timm """ sam_checkpoint = 'image_encoder.patch_embed.proj.weight' in state_dict out_dict = {} for k, v in state_dict.items(): if k.startswith('image_encoder.'): k = k[14:] k = k.replace('mlp.lin', 'mlp.fc') else: if sam_checkpoint: continue out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 1024, 1024), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ # Segment-Anyhing Model (SAM) pretrained - https://github.com/facebookresearch/segment-anything (no classifier head, for fine-tune/features only) 'samvit_base_patch16.sa1b': _cfg( url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_large_patch16.sa1b': _cfg( url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_huge_patch16.sa1b': _cfg( url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_base_patch16_224': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=1000, input_size=(3, 224, 224), crop_pct=0.9), }) def _create_vision_transformer(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError( 'features_only not implemented for Vision Transformer models.') return build_model_with_cfg( VisionTransformerSAM, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) @register_model def samvit_base_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-B/16 for Segment-Anything """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11], window_size=14, use_rel_pos=True, img_size=1024, ) model = _create_vision_transformer( 'samvit_base_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_large_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-L/16 for Segment-Anything """ model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, global_attn_indexes=[5, 11, 17, 23], window_size=14, use_rel_pos=True, img_size=1024, ) model = _create_vision_transformer( 'samvit_large_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_huge_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-H/16 for Segment-Anything """ model_args = dict( patch_size=16, embed_dim=1280, depth=32, num_heads=16, global_attn_indexes=[7, 15, 23, 31], window_size=14, use_rel_pos=True, img_size=1024, ) model = _create_vision_transformer( 'samvit_huge_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-B/16 based on samvit arch """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11], window_size=14, use_rel_pos=True, use_abs_pos=False, img_size=224, neck_chans=None, ) model = _create_vision_transformer( 'samvit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/vision_transformer_sam.py/0
{ "file_path": "pytorch-image-models/timm/models/vision_transformer_sam.py", "repo_id": "pytorch-image-models", "token_count": 12451 }
204
""" Lookahead Optimizer Wrapper. Implementation modified from: https://github.com/alphadl/lookahead.pytorch Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 Hacked together by / Copyright 2020 Ross Wightman """ from collections import OrderedDict from typing import Callable, Dict import torch from torch.optim.optimizer import Optimizer from collections import defaultdict class Lookahead(Optimizer): def __init__(self, base_optimizer, alpha=0.5, k=6): # NOTE super().__init__() not called on purpose self._optimizer_step_pre_hooks: Dict[int, Callable] = OrderedDict() self._optimizer_step_post_hooks: Dict[int, Callable] = OrderedDict() if not 0.0 <= alpha <= 1.0: raise ValueError(f'Invalid slow update rate: {alpha}') if not 1 <= k: raise ValueError(f'Invalid lookahead steps: {k}') defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) self._base_optimizer = base_optimizer self.param_groups = base_optimizer.param_groups self.defaults = base_optimizer.defaults self.defaults.update(defaults) self.state = defaultdict(dict) # manually add our defaults to the param groups for name, default in defaults.items(): for group in self._base_optimizer.param_groups: group.setdefault(name, default) @torch.no_grad() def update_slow(self, group): for fast_p in group["params"]: if fast_p.grad is None: continue param_state = self._base_optimizer.state[fast_p] if 'lookahead_slow_buff' not in param_state: param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) param_state['lookahead_slow_buff'].copy_(fast_p) slow = param_state['lookahead_slow_buff'] slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) fast_p.copy_(slow) def sync_lookahead(self): for group in self._base_optimizer.param_groups: self.update_slow(group) @torch.no_grad() def step(self, closure=None): loss = self._base_optimizer.step(closure) for group in self._base_optimizer.param_groups: group['lookahead_step'] += 1 if group['lookahead_step'] % group['lookahead_k'] == 0: self.update_slow(group) return loss def state_dict(self): return self._base_optimizer.state_dict() def load_state_dict(self, state_dict): self._base_optimizer.load_state_dict(state_dict) self.param_groups = self._base_optimizer.param_groups
pytorch-image-models/timm/optim/lookahead.py/0
{ "file_path": "pytorch-image-models/timm/optim/lookahead.py", "repo_id": "pytorch-image-models", "token_count": 1134 }
205
""" Scheduler Factory Hacked together by / Copyright 2021 Ross Wightman """ from typing import List, Optional, Union from torch.optim import Optimizer from .cosine_lr import CosineLRScheduler from .multistep_lr import MultiStepLRScheduler from .plateau_lr import PlateauLRScheduler from .poly_lr import PolyLRScheduler from .step_lr import StepLRScheduler from .tanh_lr import TanhLRScheduler def scheduler_kwargs(cfg, decreasing_metric: Optional[bool] = None): """ cfg/argparse to kwargs helper Convert scheduler args in argparse args or cfg (.dot) like object to keyword args. """ eval_metric = getattr(cfg, 'eval_metric', 'top1') if decreasing_metric is not None: plateau_mode = 'min' if decreasing_metric else 'max' else: plateau_mode = 'min' if 'loss' in eval_metric else 'max' kwargs = dict( sched=cfg.sched, num_epochs=getattr(cfg, 'epochs', 100), decay_epochs=getattr(cfg, 'decay_epochs', 30), decay_milestones=getattr(cfg, 'decay_milestones', [30, 60]), warmup_epochs=getattr(cfg, 'warmup_epochs', 5), cooldown_epochs=getattr(cfg, 'cooldown_epochs', 0), patience_epochs=getattr(cfg, 'patience_epochs', 10), decay_rate=getattr(cfg, 'decay_rate', 0.1), min_lr=getattr(cfg, 'min_lr', 0.), warmup_lr=getattr(cfg, 'warmup_lr', 1e-5), warmup_prefix=getattr(cfg, 'warmup_prefix', False), noise=getattr(cfg, 'lr_noise', None), noise_pct=getattr(cfg, 'lr_noise_pct', 0.67), noise_std=getattr(cfg, 'lr_noise_std', 1.), noise_seed=getattr(cfg, 'seed', 42), cycle_mul=getattr(cfg, 'lr_cycle_mul', 1.), cycle_decay=getattr(cfg, 'lr_cycle_decay', 0.1), cycle_limit=getattr(cfg, 'lr_cycle_limit', 1), k_decay=getattr(cfg, 'lr_k_decay', 1.0), plateau_mode=plateau_mode, step_on_epochs=not getattr(cfg, 'sched_on_updates', False), ) return kwargs def create_scheduler( args, optimizer: Optimizer, updates_per_epoch: int = 0, ): return create_scheduler_v2( optimizer=optimizer, **scheduler_kwargs(args), updates_per_epoch=updates_per_epoch, ) def create_scheduler_v2( optimizer: Optimizer, sched: str = 'cosine', num_epochs: int = 300, decay_epochs: int = 90, decay_milestones: List[int] = (90, 180, 270), cooldown_epochs: int = 0, patience_epochs: int = 10, decay_rate: float = 0.1, min_lr: float = 0, warmup_lr: float = 1e-5, warmup_epochs: int = 0, warmup_prefix: bool = False, noise: Union[float, List[float]] = None, noise_pct: float = 0.67, noise_std: float = 1., noise_seed: int = 42, cycle_mul: float = 1., cycle_decay: float = 0.1, cycle_limit: int = 1, k_decay: float = 1.0, plateau_mode: str = 'max', step_on_epochs: bool = True, updates_per_epoch: int = 0, ): t_initial = num_epochs warmup_t = warmup_epochs decay_t = decay_epochs cooldown_t = cooldown_epochs if not step_on_epochs: assert updates_per_epoch > 0, 'updates_per_epoch must be set to number of dataloader batches' t_initial = t_initial * updates_per_epoch warmup_t = warmup_t * updates_per_epoch decay_t = decay_t * updates_per_epoch decay_milestones = [d * updates_per_epoch for d in decay_milestones] cooldown_t = cooldown_t * updates_per_epoch # warmup args warmup_args = dict( warmup_lr_init=warmup_lr, warmup_t=warmup_t, warmup_prefix=warmup_prefix, ) # setup noise args for supporting schedulers if noise is not None: if isinstance(noise, (list, tuple)): noise_range = [n * t_initial for n in noise] if len(noise_range) == 1: noise_range = noise_range[0] else: noise_range = noise * t_initial else: noise_range = None noise_args = dict( noise_range_t=noise_range, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, ) # setup cycle args for supporting schedulers cycle_args = dict( cycle_mul=cycle_mul, cycle_decay=cycle_decay, cycle_limit=cycle_limit, ) lr_scheduler = None if sched == 'cosine': lr_scheduler = CosineLRScheduler( optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args, k_decay=k_decay, ) elif sched == 'tanh': lr_scheduler = TanhLRScheduler( optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args, ) elif sched == 'step': lr_scheduler = StepLRScheduler( optimizer, decay_t=decay_t, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args, ) elif sched == 'multistep': lr_scheduler = MultiStepLRScheduler( optimizer, decay_t=decay_milestones, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args, ) elif sched == 'plateau': assert step_on_epochs, 'Plateau LR only supports step per epoch.' warmup_args.pop('warmup_prefix', False) lr_scheduler = PlateauLRScheduler( optimizer, decay_rate=decay_rate, patience_t=patience_epochs, cooldown_t=0, **warmup_args, lr_min=min_lr, mode=plateau_mode, **noise_args, ) elif sched == 'poly': lr_scheduler = PolyLRScheduler( optimizer, power=decay_rate, # overloading 'decay_rate' as polynomial power t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, k_decay=k_decay, **cycle_args, **warmup_args, **noise_args, ) if hasattr(lr_scheduler, 'get_cycle_length'): # for cycle based schedulers (cosine, tanh, poly) recalculate total epochs w/ cycles & cooldown t_with_cycles_and_cooldown = lr_scheduler.get_cycle_length() + cooldown_t if step_on_epochs: num_epochs = t_with_cycles_and_cooldown else: num_epochs = t_with_cycles_and_cooldown // updates_per_epoch return lr_scheduler, num_epochs
pytorch-image-models/timm/scheduler/scheduler_factory.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/scheduler_factory.py", "repo_id": "pytorch-image-models", "token_count": 3467 }
206
from typing import Optional, Tuple, List import torch def onnx_forward(onnx_file, example_input): import onnxruntime sess_options = onnxruntime.SessionOptions() session = onnxruntime.InferenceSession(onnx_file, sess_options) input_name = session.get_inputs()[0].name output = session.run([], {input_name: example_input.numpy()}) output = output[0] return output def onnx_export( model: torch.nn.Module, output_file: str, example_input: Optional[torch.Tensor] = None, training: bool = False, verbose: bool = False, check: bool = True, check_forward: bool = False, batch_size: int = 64, input_size: Tuple[int, int, int] = None, opset: Optional[int] = None, dynamic_size: bool = False, aten_fallback: bool = False, keep_initializers: Optional[bool] = None, use_dynamo: bool = False, input_names: List[str] = None, output_names: List[str] = None, ): import onnx if training: training_mode = torch.onnx.TrainingMode.TRAINING model.train() else: training_mode = torch.onnx.TrainingMode.EVAL model.eval() if example_input is None: if not input_size: assert hasattr(model, 'default_cfg') input_size = model.default_cfg.get('input_size') example_input = torch.randn((batch_size,) + input_size, requires_grad=training) # Run model once before export trace, sets padding for models with Conv2dSameExport. This means # that the padding for models with Conv2dSameExport (most models with tf_ prefix) is fixed for # the input img_size specified in this script. # Opset >= 11 should allow for dynamic padding, however I cannot get it to work due to # issues in the tracing of the dynamic padding or errors attempting to export the model after jit # scripting it (an approach that should work). Perhaps in a future PyTorch or ONNX versions... with torch.no_grad(): original_out = model(example_input) input_names = input_names or ["input0"] output_names = output_names or ["output0"] dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}} if dynamic_size: dynamic_axes['input0'][2] = 'height' dynamic_axes['input0'][3] = 'width' if aten_fallback: export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK else: export_type = torch.onnx.OperatorExportTypes.ONNX if use_dynamo: export_options = torch.onnx.ExportOptions(dynamic_shapes=dynamic_size) export_output = torch.onnx.dynamo_export( model, example_input, export_options=export_options, ) export_output.save(output_file) torch_out = None else: torch_out = torch.onnx._export( model, example_input, output_file, training=training_mode, export_params=True, verbose=verbose, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=keep_initializers, dynamic_axes=dynamic_axes, opset_version=opset, operator_export_type=export_type ) if check: onnx_model = onnx.load(output_file) onnx.checker.check_model(onnx_model, full_check=True) # assuming throw on error if check_forward and not training: import numpy as np onnx_out = onnx_forward(output_file, example_input) if torch_out is not None: np.testing.assert_almost_equal(torch_out.numpy(), onnx_out, decimal=3) np.testing.assert_almost_equal(original_out.numpy(), torch_out.numpy(), decimal=5) else: np.testing.assert_almost_equal(original_out.numpy(), onnx_out, decimal=3)
pytorch-image-models/timm/utils/onnx.py/0
{ "file_path": "pytorch-image-models/timm/utils/onnx.py", "repo_id": "pytorch-image-models", "token_count": 1722 }
207
repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace exclude: docs/source/basic_tutorials/launcher.md - repo: https://github.com/psf/black rev: 24.2.0 hooks: - id: black - repo: https://github.com/doublify/pre-commit-rust rev: v1.0 hooks: - id: fmt - id: cargo-check - id: clippy
text-generation-inference/.pre-commit-config.yaml/0
{ "file_path": "text-generation-inference/.pre-commit-config.yaml", "repo_id": "text-generation-inference", "token_count": 226 }
208
/// Text Generation Inference benchmarking tool /// /// Inspired by the great Oha app: https://github.com/hatoo/oha /// and: https://github.com/orhun/rust-tui-template use clap::Parser; use std::path::Path; use text_generation_client::ShardedClient; use tokenizers::{FromPretrainedParameters, Tokenizer}; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { /// The name of the tokenizer (as in model_id on the huggingface hub, or local path). #[clap(short, long, env)] tokenizer_name: String, /// The revision to use for the tokenizer if on the hub. #[clap(default_value = "main", long, env)] revision: String, /// The various batch sizes to benchmark for, the idea is to get enough /// batching to start seeing increased latency, this usually means you're /// moving from memory bound (usual as BS=1) to compute bound, and this is /// a sweet spot for the maximum batch size for the model under test #[clap(short, long)] batch_size: Option<Vec<u32>>, /// This is the initial prompt sent to the text-generation-server length /// in token. Longer prompt will slow down the benchmark. Usually the /// latency grows somewhat linearly with this for the prefill step. /// /// Most importantly, the prefill step is usually not the one dominating /// your runtime, so it's ok to keep it short. #[clap(default_value = "10", short, long, env)] sequence_length: u32, /// This is how many tokens will be generated by the server and averaged out /// to give the `decode` latency. This is the *critical* number you want to optimize for /// LLM spend most of their time doing decoding. /// /// Decode latency is usually quite stable. #[clap(default_value = "8", short, long, env)] decode_length: u32, ///How many runs should we average from #[clap(default_value = "10", short, long, env)] runs: usize, /// Number of warmup cycles #[clap(default_value = "1", short, long, env)] warmups: usize, /// The location of the grpc socket. This benchmark tool bypasses the router /// completely and directly talks to the gRPC processes #[clap(default_value = "/tmp/text-generation-server-0", short, long, env)] master_shard_uds_path: String, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] temperature: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_k: Option<u32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] typical_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] repetition_penalty: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] frequency_penalty: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] watermark: bool, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] do_sample: bool, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_n_tokens: Option<u32>, } fn main() -> Result<(), Box<dyn std::error::Error>> { init_logging(); // Get args let args = Args::parse(); // Pattern match configuration let Args { tokenizer_name, revision, batch_size, sequence_length, decode_length, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, frequency_penalty, watermark, do_sample, master_shard_uds_path, top_n_tokens, } = args; let batch_size = batch_size.unwrap_or(vec![1, 2, 4, 8, 16, 32]); // Tokenizer instance // This will only be used to validate payloads tracing::info!("Loading tokenizer"); let local_path = Path::new(&tokenizer_name); let tokenizer = if local_path.exists() && local_path.is_dir() && local_path.join("tokenizer.json").exists() { // Load local tokenizer tracing::info!("Found local tokenizer"); Tokenizer::from_file(local_path.join("tokenizer.json")).unwrap() } else { tracing::info!("Downloading tokenizer"); // Parse Huggingface hub token let auth_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok(); // Download and instantiate tokenizer // We need to download it outside of the Tokio runtime let params = FromPretrainedParameters { revision, auth_token, ..Default::default() }; Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap() }; tracing::info!("Tokenizer loaded"); // Launch Tokio runtime tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap() .block_on(async { // Instantiate sharded client from the master unix socket tracing::info!("Connect to model server"); let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) .await .expect("Could not connect to server"); // Clear the cache; useful if the webserver rebooted sharded_client .clear_cache(None) .await .expect("Unable to clear cache"); tracing::info!("Connected"); // Run app text_generation_benchmark::run( tokenizer_name, tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, frequency_penalty, watermark, do_sample, sharded_client, ) .await .unwrap(); }); Ok(()) } /// Init logging using LOG_LEVEL fn init_logging() { // STDOUT/STDERR layer let fmt_layer = tracing_subscriber::fmt::layer() .with_file(true) .with_line_number(true); // Filter events with LOG_LEVEL let env_filter = EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info")); tracing_subscriber::registry() .with(env_filter) .with(fmt_layer) .init(); }
text-generation-inference/benchmark/src/main.rs/0
{ "file_path": "text-generation-inference/benchmark/src/main.rs", "repo_id": "text-generation-inference", "token_count": 3113 }
209
import os import requests from typing import Dict, Optional, List from huggingface_hub.utils import build_hf_headers from text_generation import Client, AsyncClient, __version__ from text_generation.types import DeployedModel from text_generation.errors import NotSupportedError, parse_error INFERENCE_ENDPOINT = os.environ.get( "HF_INFERENCE_ENDPOINT", "https://api-inference.huggingface.co" ) def deployed_models(headers: Optional[Dict] = None) -> List[DeployedModel]: """ Get all currently deployed models with text-generation-inference-support Returns: List[DeployedModel]: list of all currently deployed models """ resp = requests.get( f"https://api-inference.huggingface.co/framework/text-generation-inference", headers=headers, timeout=5, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) models = [DeployedModel(**raw_deployed_model) for raw_deployed_model in payload] return models def check_model_support(repo_id: str, headers: Optional[Dict] = None) -> bool: """ Check if a given model is supported by text-generation-inference Returns: bool: whether the model is supported by this client """ resp = requests.get( f"https://api-inference.huggingface.co/status/{repo_id}", headers=headers, timeout=5, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) framework = payload["framework"] supported = framework == "text-generation-inference" return supported class InferenceAPIClient(Client): """Client to make calls to the HuggingFace Inference API. Only supports a subset of the available text-generation or text2text-generation models that are served using text-generation-inference Example: ```python >>> from text_generation import InferenceAPIClient >>> client = InferenceAPIClient("bigscience/bloomz") >>> client.generate("Why is the sky blue?").generated_text ' Rayleigh scattering' >>> result = "" >>> for response in client.generate_stream("Why is the sky blue?"): >>> if not response.token.special: >>> result += response.token.text >>> result ' Rayleigh scattering' ``` """ def __init__(self, repo_id: str, token: Optional[str] = None, timeout: int = 10): """ Init headers and API information Args: repo_id (`str`): Id of repository (e.g. `bigscience/bloom`). token (`str`, `optional`): The API token to use as HTTP bearer authorization. This is not the authentication token. You can find the token in https://huggingface.co/settings/token. Alternatively, you can find both your organizations and personal API tokens using `HfApi().whoami(token)`. timeout (`int`): Timeout in seconds """ headers = build_hf_headers( token=token, library_name="text-generation", library_version=__version__ ) # Text Generation Inference client only supports a subset of the available hub models if not check_model_support(repo_id, headers): raise NotSupportedError(repo_id) base_url = f"{INFERENCE_ENDPOINT}/models/{repo_id}" super(InferenceAPIClient, self).__init__( base_url, headers=headers, timeout=timeout ) class InferenceAPIAsyncClient(AsyncClient): """Aynschronous Client to make calls to the HuggingFace Inference API. Only supports a subset of the available text-generation or text2text-generation models that are served using text-generation-inference Example: ```python >>> from text_generation import InferenceAPIAsyncClient >>> client = InferenceAPIAsyncClient("bigscience/bloomz") >>> response = await client.generate("Why is the sky blue?") >>> response.generated_text ' Rayleigh scattering' >>> result = "" >>> async for response in client.generate_stream("Why is the sky blue?"): >>> if not response.token.special: >>> result += response.token.text >>> result ' Rayleigh scattering' ``` """ def __init__(self, repo_id: str, token: Optional[str] = None, timeout: int = 10): """ Init headers and API information Args: repo_id (`str`): Id of repository (e.g. `bigscience/bloom`). token (`str`, `optional`): The API token to use as HTTP bearer authorization. This is not the authentication token. You can find the token in https://huggingface.co/settings/token. Alternatively, you can find both your organizations and personal API tokens using `HfApi().whoami(token)`. timeout (`int`): Timeout in seconds """ headers = build_hf_headers( token=token, library_name="text-generation", library_version=__version__ ) # Text Generation Inference client only supports a subset of the available hub models if not check_model_support(repo_id, headers): raise NotSupportedError(repo_id) base_url = f"{INFERENCE_ENDPOINT}/models/{repo_id}" super(InferenceAPIAsyncClient, self).__init__( base_url, headers=headers, timeout=timeout )
text-generation-inference/clients/python/text_generation/inference_api.py/0
{ "file_path": "text-generation-inference/clients/python/text_generation/inference_api.py", "repo_id": "text-generation-inference", "token_count": 2183 }
210
## Speculation Speculative decoding, assisted generation, Medusa, and others are a few different names for the same idea. The idea is to generate tokens *before* the large model actually runs, and only *check* if those tokens where valid. So you are making *more* computations on your LLM, but if you are correct you produce 1, 2, 3 etc.. tokens on a single LLM pass. Since LLMs are usually memory bound (and not compute bound), provided your guesses are correct enough, this is a 2-3x faster inference (It can be much more for code oriented tasks for instance). You can check a more [detailed explanation](https://huggingface.co/blog/assisted-generation). Text-generation inference supports 2 main speculative methods: - Medusa - N-gram ### Medusa Medusa is a [simple method](https://arxiv.org/abs/2401.10774) to create many tokens in a single pass using fine-tuned LM heads in addition to your existing models. You can check a few existing fine-tunes for popular models: - [text-generation-inference/gemma-7b-it-medusa](https://huggingface.co/text-generation-inference/gemma-7b-it-medusa) - [text-generation-inference/Mixtral-8x7B-Instruct-v0.1-medusa](https://huggingface.co/text-generation-inference/Mixtral-8x7B-Instruct-v0.1-medusa) - [text-generation-inference/Mistral-7B-Instruct-v0.2-medusa](https://huggingface.co/text-generation-inference/Mistral-7B-Instruct-v0.2-medusa) In order to create your own medusa heads for your own finetune, you should check own the original medusa repo. [https://github.com/FasterDecoding/Medusa](https://github.com/FasterDecoding/Medusa) In order to use medusa models in TGI, simply point to a medusa enabled model, and everything will load automatically. ### N-gram If you don't have a medusa model, or don't have the resource to fine-tune, you can try to use `n-gram`. Ngram works by trying to find in the previous sequence existing tokens that match, and use those as speculation. This is an extremely simple method, which works best for code, or highly repetitive text. This might not be beneficial, if the speculation misses too much. In order to enable n-gram speculation simply use `--speculate 2` in your flags. [Details about the flag](https://huggingface.co/docs/text-generation-inference/basic_tutorials/launcher#speculate)
text-generation-inference/docs/source/conceptual/speculation.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/speculation.md", "repo_id": "text-generation-inference", "token_count": 671 }
211
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5351562, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5566406, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2519531, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.03414917, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.3647461, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.012901306, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.1542969, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.4362793, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5332031, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5625, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2617188, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.033996582, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.9609375, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36572266, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.0129776, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.4362793, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5332031, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5625, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2617188, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.033996582, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.9609375, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36572266, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.0129776, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.4362793, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5332031, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5625, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2617188, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.033996582, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.9609375, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36572266, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.0129776, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.4362793, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json", "repo_id": "text-generation-inference", "token_count": 4901 }
212
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 14402, "logprob": null, "text": "Test" }, { "id": 2581, "logprob": -11.6171875, "text": " request" } ], "seed": null, "tokens": [ { "id": 25, "logprob": -2.3203125, "special": false, "text": ":" }, { "id": 1391, "logprob": -0.98779297, "special": false, "text": " {" }, { "id": 25927, "logprob": -0.7729492, "special": false, "text": "request" }, { "id": 92, "logprob": -0.7241211, "special": false, "text": "}" }, { "id": 4943, "logprob": -0.4091797, "special": false, "text": "\")" }, { "id": 198, "logprob": -0.119018555, "special": false, "text": "\n" }, { "id": 50280, "logprob": -0.9707031, "special": false, "text": " " }, { "id": 26209, "logprob": -1.4414062, "special": false, "text": "response" }, { "id": 796, "logprob": -0.056854248, "special": false, "text": " =" }, { "id": 2116, "logprob": -1.1533203, "special": false, "text": " self" } ], "top_tokens": null }, "generated_text": ": {request}\")\n response = self" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 14402, "logprob": null, "text": "Test" }, { "id": 2581, "logprob": -11.6171875, "text": " request" } ], "seed": null, "tokens": [ { "id": 25, "logprob": -2.3203125, "special": false, "text": ":" }, { "id": 1391, "logprob": -0.98779297, "special": false, "text": " {" }, { "id": 25927, "logprob": -0.7729492, "special": false, "text": "request" }, { "id": 92, "logprob": -0.7241211, "special": false, "text": "}" }, { "id": 4943, "logprob": -0.4091797, "special": false, "text": "\")" }, { "id": 198, "logprob": -0.119018555, "special": false, "text": "\n" }, { "id": 50280, "logprob": -0.9707031, "special": false, "text": " " }, { "id": 26209, "logprob": -1.4414062, "special": false, "text": "response" }, { "id": 796, "logprob": -0.056854248, "special": false, "text": " =" }, { "id": 2116, "logprob": -1.1533203, "special": false, "text": " self" } ], "top_tokens": null }, "generated_text": ": {request}\")\n response = self" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 14402, "logprob": null, "text": "Test" }, { "id": 2581, "logprob": -11.6171875, "text": " request" } ], "seed": null, "tokens": [ { "id": 25, "logprob": -2.3203125, "special": false, "text": ":" }, { "id": 1391, "logprob": -0.98779297, "special": false, "text": " {" }, { "id": 25927, "logprob": -0.7729492, "special": false, "text": "request" }, { "id": 92, "logprob": -0.7241211, "special": false, "text": "}" }, { "id": 4943, "logprob": -0.4091797, "special": false, "text": "\")" }, { "id": 198, "logprob": -0.119018555, "special": false, "text": "\n" }, { "id": 50280, "logprob": -0.9707031, "special": false, "text": " " }, { "id": 26209, "logprob": -1.4414062, "special": false, "text": "response" }, { "id": 796, "logprob": -0.056854248, "special": false, "text": " =" }, { "id": 2116, "logprob": -1.1533203, "special": false, "text": " self" } ], "top_tokens": null }, "generated_text": ": {request}\")\n response = self" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 14402, "logprob": null, "text": "Test" }, { "id": 2581, "logprob": -11.6171875, "text": " request" } ], "seed": null, "tokens": [ { "id": 25, "logprob": -2.3203125, "special": false, "text": ":" }, { "id": 1391, "logprob": -0.98779297, "special": false, "text": " {" }, { "id": 25927, "logprob": -0.7729492, "special": false, "text": "request" }, { "id": 92, "logprob": -0.7241211, "special": false, "text": "}" }, { "id": 4943, "logprob": -0.4091797, "special": false, "text": "\")" }, { "id": 198, "logprob": -0.119018555, "special": false, "text": "\n" }, { "id": 50280, "logprob": -0.9707031, "special": false, "text": " " }, { "id": 26209, "logprob": -1.4414062, "special": false, "text": "response" }, { "id": 796, "logprob": -0.056854248, "special": false, "text": " =" }, { "id": 2116, "logprob": -1.1533203, "special": false, "text": " self" } ], "top_tokens": null }, "generated_text": ": {request}\")\n response = self" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_load.json", "repo_id": "text-generation-inference", "token_count": 4672 }
213
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4911, "logprob": -6.9765625, "text": "User" }, { "id": 29901, "logprob": -0.0059432983, "text": ":" }, { "id": 32000, "logprob": -0.8408203, "text": "<fake_token_around_image>" }, { "id": 32001, "logprob": -9.906292e-05, "text": "<image>" }, { "id": 32000, "logprob": -2.3841858e-07, "text": "<fake_token_around_image>" }, { "id": 1815, "logprob": -4.1679688, "text": "Can" }, { "id": 366, "logprob": -0.014099121, "text": "you" }, { "id": 2649, "logprob": -4.4609375, "text": "tell" }, { "id": 592, "logprob": -0.29882812, "text": "me" }, { "id": 263, "logprob": -4.1445312, "text": "a" }, { "id": 1407, "logprob": -9.3828125, "text": "very" }, { "id": 3273, "logprob": -1.9736328, "text": "short" }, { "id": 5828, "logprob": -0.2800293, "text": "story" }, { "id": 2729, "logprob": -3.5625, "text": "based" }, { "id": 373, "logprob": -0.0006427765, "text": "on" }, { "id": 278, "logprob": -0.13952637, "text": "the" }, { "id": 1967, "logprob": -0.068115234, "text": "image" }, { "id": 29973, "logprob": -0.16357422, "text": "?" } ], "seed": null, "tokens": [ { "id": 32002, "logprob": -0.0026474, "special": true, "text": "<end_of_utterance>" }, { "id": 29871, "logprob": -8.547306e-05, "special": false, "text": " " }, { "id": 13, "logprob": -1.7881393e-05, "special": false, "text": "\n" }, { "id": 7900, "logprob": -3.0994415e-06, "special": false, "text": "Ass" }, { "id": 22137, "logprob": 0.0, "special": false, "text": "istant" }, { "id": 29901, "logprob": -3.2186508e-06, "special": false, "text": ":" }, { "id": 319, "logprob": -0.92529297, "special": false, "text": " A" }, { "id": 696, "logprob": -1.1269531, "special": false, "text": " ro" }, { "id": 15664, "logprob": -0.00029492378, "special": false, "text": "oster" }, { "id": 15028, "logprob": -1.1855469, "special": false, "text": " stands" } ] }, "generated_text": " \nAssistant: A rooster stands" }
text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics.json", "repo_id": "text-generation-inference", "token_count": 2062 }
214
{ "choices": [ { "finish_reason": "length", "index": 0, "logprobs": null, "message": { "content": "As of today, there is a Update available for the Brooklyn, New York, area. According to the latest forecast, it's warm with high temperatures throughout the day. It's forecasted at 75°F for today and 77°F for tomorrow. However, in autumn, the weather typically changes drastically, becoming cooler and wetter. You can find the current weather forecast for the area through your local weather service. Additionally", "name": null, "role": "assistant", "tool_calls": null }, "usage": null } ], "created": 1708957015, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "1.4.3-native", "usage": { "completion_tokens": 100, "prompt_tokens": 60, "total_tokens": 160 } }
text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_no_tools.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_no_tools.json", "repo_id": "text-generation-inference", "token_count": 355 }
215
import pytest @pytest.fixture(scope="module") def flash_neox_handle(launcher): with launcher("stabilityai/stablelm-tuned-alpha-3b", num_shard=1) as handle: yield handle @pytest.fixture(scope="module") async def flash_neox(flash_neox_handle): await flash_neox_handle.health(300) return flash_neox_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox(flash_neox, response_snapshot): response = await flash_neox.generate( "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox_load(flash_neox, generate_load, response_snapshot): responses = await generate_load( flash_neox, "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert all( [text == generated_texts[0] for text in generated_texts] ), generated_texts assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_neox.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_neox.py", "repo_id": "text-generation-inference", "token_count": 498 }
216
import pytest import json from text_generation.types import GrammarType @pytest.fixture(scope="module") def flash_llama_grammar_tools_handle(launcher): with launcher( "TinyLlama/TinyLlama-1.1B-Chat-v1.0", num_shard=2, disable_grammar_support=False ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_grammar_tools(flash_llama_grammar_tools_handle): await flash_llama_grammar_tools_handle.health(300) return flash_llama_grammar_tools_handle.client # tools to be used in the following tests tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get the current weather", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location.", }, }, "required": ["location", "format"], }, }, }, { "type": "function", "function": { "name": "get_n_day_weather_forecast", "description": "Get an N-day weather forecast", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location.", }, "num_days": { "type": "integer", "description": "The number of days to forecast", }, }, "required": ["location", "format", "num_days"], }, }, }, ] @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_no_tools( flash_llama_grammar_tools, response_snapshot ): response = await flash_llama_grammar_tools.chat( max_tokens=100, seed=1, messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], ) assert ( response.choices[0].message.content == "As of today, there is a Update available for the Brooklyn, New York, area. According to the latest forecast, it's warm with high temperatures throughout the day. It's forecasted at 75°F for today and 77°F for tomorrow. However, in autumn, the weather typically changes drastically, becoming cooler and wetter. You can find the current weather forecast for the area through your local weather service. Additionally" ) assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools(flash_llama_grammar_tools, response_snapshot): response = await flash_llama_grammar_tools.chat( max_tokens=100, seed=1, tools=tools, presence_penalty=-1.1, messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], ) assert response.choices[0].message.content == None assert response.choices[0].message.tool_calls == { "function": { "description": None, "name": "tools", "parameters": { "format": "celsius", "location": "New York, NY", "num_days": 14, }, }, "id": 0, "type": "function", } assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_auto( flash_llama_grammar_tools, response_snapshot ): response = await flash_llama_grammar_tools.chat( max_tokens=100, seed=1, tools=tools, tool_choice="auto", presence_penalty=-1.1, messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], ) assert response.choices[0].message.content == None assert response.choices[0].message.tool_calls == { "function": { "description": None, "name": "tools", "parameters": { "format": "celsius", "location": "New York, NY", "num_days": 14, }, }, "id": 0, "type": "function", } assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_choice( flash_llama_grammar_tools, response_snapshot ): response = await flash_llama_grammar_tools.chat( max_tokens=100, seed=1, tools=tools, tool_choice="get_current_weather", presence_penalty=-1.1, messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], ) assert response.choices[0].message.content == None assert response.choices[0].message.tool_calls == { "id": 0, "type": "function", "function": { "description": None, "name": "tools", "parameters": {"format": "celsius", "location": "New York, NY"}, }, } assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_stream( flash_llama_grammar_tools, response_snapshot ): responses = await flash_llama_grammar_tools.chat( max_tokens=100, seed=1, tools=tools, tool_choice="get_current_weather", presence_penalty=-1.1, messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Paris, France?", }, ], stream=True, ) count = 0 async for response in responses: count += 1 assert count == 20 assert response == response_snapshot
text-generation-inference/integration-tests/models/test_tools_llama.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_tools_llama.py", "repo_id": "text-generation-inference", "token_count": 3763 }
217
use std::error::Error; use vergen::EmitBuilder; fn main() -> Result<(), Box<dyn Error>> { // Try to get the git sha from the local git repository if EmitBuilder::builder() .fail_on_error() .git_sha(false) .emit() .is_err() { // Unable to get the git sha if let Ok(sha) = std::env::var("GIT_SHA") { // Set it from an env var println!("cargo:rustc-env=VERGEN_GIT_SHA={sha}"); } } // Set docker label if present if let Ok(label) = std::env::var("DOCKER_LABEL") { // Set it from an env var println!("cargo:rustc-env=DOCKER_LABEL={label}"); } Ok(()) }
text-generation-inference/router/build.rs/0
{ "file_path": "text-generation-inference/router/build.rs", "repo_id": "text-generation-inference", "token_count": 324 }
218
[toolchain] # Released on: 28 December, 2023 # Branched from master on: 10 November, 2023 # https://releases.rs/docs/1.75.0/ channel = "1.75.0" components = ["rustfmt", "clippy"]
text-generation-inference/rust-toolchain.toml/0
{ "file_path": "text-generation-inference/rust-toolchain.toml", "repo_id": "text-generation-inference", "token_count": 69 }
219
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _cuda_buffers_cuh #define _cuda_buffers_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> const int CUDA_MAX_DEVICES = 16; // #ifndef _cuda_buffers_cu // extern __constant__ half2 q4_table[16][256]; // #endif class CudaBuffers { public: int device; half* temp_state; // [max_hidden_rows * intermediate_size] half* temp_dq; // size of largest quant tensor * 8 cudaStream_t alt_stream_1; cudaStream_t alt_stream_2; cudaStream_t alt_stream_3; cudaEvent_t alt_stream_1_done; cudaEvent_t alt_stream_2_done; cudaEvent_t alt_stream_3_done; CudaBuffers ( int _device, half* _temp_state, half* _temp_dq ); ~CudaBuffers(); }; CudaBuffers* get_buffers(const int device_index); void prepare_buffers_cuda ( int _device, half* _temp_state, half* _temp_dq ); void cleanup_buffers_cuda(); #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cuh", "repo_id": "text-generation-inference", "token_count": 471 }
220
#ifndef _matrix_view_cuh #define _matrix_view_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include "quant/qdq_util.cuh" class MatrixView_half { public: const half* data; const int height; const int width; __device__ __forceinline__ MatrixView_half(const half* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } __device__ __forceinline__ half2 item_half2half2(int row, int column) const { return __half2half2(data[row * width + column]); } __device__ __forceinline__ const half* item_ptr(int row, int column) const { return &data[row * width + column]; } __device__ __forceinline__ void item4(half (&items)[4], int row, int column) const { half2* ptr = (half2*) item_ptr(row, column); half2 i01 = ptr[0]; half2 i23 = ptr[1]; items[0] = __low2half(i01); items[1] = __high2half(i01); items[2] = __low2half(i23); items[3] = __high2half(i23); } __device__ __forceinline__ void item4_f(float (&items)[4], int row, int column) const { half2* ptr = (half2*)item_ptr(row, column); half2 i01 = ptr[0]; half2 i23 = ptr[1]; items[0] = __half2float(__low2half(i01)); items[1] = __half2float(__high2half(i01)); items[2] = __half2float(__low2half(i23)); items[3] = __half2float(__high2half(i23)); } __device__ __forceinline__ void item4_h2(half2 (&items)[4], int row, int column) const { half2* ptr = (half2*)item_ptr(row, column); half2 i01 = ptr[0]; half2 i23 = ptr[1]; items[0] = __half2half2(__low2half(i01)); items[1] = __half2half2(__high2half(i01)); items[2] = __half2half2(__low2half(i23)); items[3] = __half2half2(__high2half(i23)); } }; class MatrixView_half_rw { public: half* data; const int height; const int width; __device__ __forceinline__ MatrixView_half_rw(half* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } __device__ __forceinline__ half* item_ptr(int row, int column) { return &data[row * width + column]; } __device__ __forceinline__ void set(int row, int column, half value) { data[row * width + column] = value; } __device__ __forceinline__ void set_half2(int row, int column, half2 value) { ((half2*)data)[(row * width + column) / 2] = value; } __device__ __forceinline__ void set4(int row, int column, half v0, half v1, half v2, half v3) { half2 v01 = __halves2half2(v0, v1); half2 v23 = __halves2half2(v2, v3); half2* ptr = (half2*) item_ptr(row, column); ptr[0] = v01; ptr[1] = v23; } }; class MatrixView_q4_row { public: const uint32_t* data; const int height; const int width; __device__ __forceinline__ MatrixView_q4_row(const uint32_t* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ int item(int row, int column) const { int shift = (column & 0x07) * 4; return (data[row * width / 8 + column / 8] >> shift) & 0x0f; } __device__ __forceinline__ void item2(int (&items)[2], int row, int column) const { int shift = (column & 0x07) * 4; uint32_t d = data[row * width / 8 + column / 8] >> shift; items[0] = d & 0x0f; items[1] = (d >> 4) & 0x0f; } __device__ __forceinline__ void item4(int (&items)[4], int row, int column) const { int shift = (column & 0x07) * 4; uint32_t d = data[row * width / 8 + column / 8] >> shift; items[0] = d & 0x0f; items[1] = (d >> 4) & 0x0f; items[2] = (d >> 8) & 0x0f; items[3] = (d >> 12) & 0x0f; } }; #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/matrix_view.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/matrix_view.cuh", "repo_id": "text-generation-inference", "token_count": 1862 }
221
from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension import torch extra_cuda_cflags = ["-lineinfo", "-O3"] if torch.version.hip: extra_cuda_cflags += ["-DHIPBLAS_USE_HIP_HALF"] extra_compile_args = { "nvcc": extra_cuda_cflags, } setup( name="exllamav2_kernels", ext_modules=[ CUDAExtension( name="exllamav2_kernels", sources=[ "exllamav2_kernels/ext.cpp", "exllamav2_kernels/cuda/q_matrix.cu", "exllamav2_kernels/cuda/q_gemm.cu", ], extra_compile_args=extra_compile_args, ) ], cmdclass={"build_ext": BuildExtension}, )
text-generation-inference/server/exllamav2_kernels/setup.py/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/setup.py", "repo_id": "text-generation-inference", "token_count": 363 }
222
# test_watermark_logits_processor.py import os import numpy as np import torch from text_generation_server.utils.watermark import WatermarkLogitsProcessor GAMMA = os.getenv("WATERMARK_GAMMA", 0.5) DELTA = os.getenv("WATERMARK_DELTA", 2.0) def test_seed_rng(): input_ids = [101, 2036, 3731, 102, 2003, 103] processor = WatermarkLogitsProcessor() processor._seed_rng(input_ids) assert isinstance(processor.rng, torch.Generator) def test_get_greenlist_ids(): input_ids = [101, 2036, 3731, 102, 2003, 103] processor = WatermarkLogitsProcessor() result = processor._get_greenlist_ids(input_ids, 10, torch.device("cpu")) assert max(result) <= 10 assert len(result) == int(10 * 0.5) def test_calc_greenlist_mask(): processor = WatermarkLogitsProcessor() scores = torch.tensor([[0.5, 0.3, 0.2, 0.8], [0.1, 0.2, 0.7, 0.9]]) greenlist_token_ids = torch.tensor([2, 3]) result = processor._calc_greenlist_mask(scores, greenlist_token_ids) assert result.tolist() == [[False, False, False, False], [False, False, True, True]] assert result.shape == scores.shape def test_bias_greenlist_logits(): processor = WatermarkLogitsProcessor() scores = torch.tensor([[0.5, 0.3, 0.2, 0.8], [0.1, 0.2, 0.7, 0.9]]) green_tokens_mask = torch.tensor( [[False, False, True, True], [False, False, False, True]] ) greenlist_bias = 2.0 result = processor._bias_greenlist_logits(scores, green_tokens_mask, greenlist_bias) assert np.allclose(result.tolist(), [[0.5, 0.3, 2.2, 2.8], [0.1, 0.2, 0.7, 2.9]]) assert result.shape == scores.shape def test_call(): input_ids = [101, 2036, 3731, 102, 2003, 103] processor = WatermarkLogitsProcessor() scores = torch.tensor([[0.5, 0.3, 0.2, 0.8], [0.1, 0.2, 0.7, 0.9]]) result = processor(input_ids, scores) assert result.shape == scores.shape
text-generation-inference/server/tests/utils/test_watermark.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_watermark.py", "repo_id": "text-generation-inference", "token_count": 781 }
223
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, FastLayerNorm, ) class PhiConfig(PretrainedConfig): def __init__( self, vocab_size=51200, hidden_size=2560, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, hidden_act="gelu_fast", # llama uses silu layer_norm_eps=1e-05, # rms in llama, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, resid_pdrop=0.1, # llama doesn't have this partial_rotary_factor=0.5, # important difference between llama and phi **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.rope_theta = rope_theta self.resid_pdrop = resid_pdrop self.partial_rotary_factor = partial_rotary_factor super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) # this is the same as llama except for Phi uses bias=True def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=True, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" # this is the same as llama except for Phi uses bias=True return TensorParallelColumnLinear( get_linear(weight, bias=True, quantize=config.quantize) ) class FlashPhiAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, ): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.softmax_scale = self.head_size**-0.5 self.rotary_dim = int(config.partial_rotary_factor * self.head_size) self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.rotary_dim, base=config.rope_theta, device=weights.device, ) if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) # in llama the dense layer is called "o_proj" and has bias=False self.dense = TensorParallelRowLinear.load( config, prefix=f"{prefix}.dense", weights=weights, bias=True, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ): # Compute query, key, value and split qkv = self.query_key_value(hidden_states) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) # Reshape query and key for rotary embeddings query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) # NOTE: this is the main difference between Llama and Phi # in llama the rotary embeddings are applied to the whole query and key. # Phi uses PARTIAL rotary embeddings, which are applied to the first 32 dimensions # # Apply partial positional embeddings in place self.rotary_emb( query[:, :, : self.rotary_dim], kv[:, 0, :, : self.rotary_dim], cos, sin ) # Reshape key and value and cache paged_attention.reshape_and_cache( kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots ) # output tensor attn_output = torch.empty_like(query) # Prefill if cu_seqlen_prefill is not None: flash_attn.attention( query, torch.select(kv, dim=1, index=0), torch.select(kv, dim=1, index=1), attn_output, cu_seqlen_prefill, max_s, self.softmax_scale, ) # Decode else: paged_attention.attention( attn_output, query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, input_lengths, max_s, ) return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) class PhiMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) # llama weights are up_proj and down_proj and bias=False self.up_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.fc1", weights=weights, bias=True, ) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.fc2", weights=weights, bias=True, ) def forward(self, hidden_states): # NOTE: Llama requires the gate up states to an intermediate size # Phi does not and we can avoid the `view` operation return self.down_proj(self.act(self.up_proj(hidden_states))) class FlashPhiLayer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() prefix = f"model.layers.{layer_id}" self.self_attn = FlashPhiAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.mlp = PhiMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.input_layernorm = FastLayerNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.layer_norm_eps, ) self.resid_dropout = torch.nn.Dropout(config.resid_pdrop) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ): hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ) hidden_states = self.resid_dropout(attn_output).add( self.resid_dropout(self.mlp(hidden_states)) ) return hidden_states, res class FlashPhiModel(torch.nn.Module): def __init__(self, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding( prefix="model.embed_tokens", weights=weights ) self.layers = nn.ModuleList( [ FlashPhiLayer( layer_id, config, weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads self.norm = FastLayerNorm.load( prefix="model.final_layernorm", weights=weights, eps=config.layer_norm_eps, ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( position_ids, max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, input_lengths, max_s, ) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashPhiForCausalLM(torch.nn.Module): def __init__(self, config, weights): super().__init__() self.model = FlashPhiModel(config, weights) self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights, ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] return self.lm_head(hidden_states)
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py", "repo_id": "text-generation-inference", "token_count": 6564 }
224
# coding=utf-8 # Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch T5 model.""" import copy import math import warnings from typing import Optional, Tuple, Union from loguru import logger import torch import torch.distributed from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, ) from transformers.modeling_utils import PreTrainedModel from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS from transformers.utils import ( is_torch_fx_proxy, ) from transformers import T5Config from text_generation_server.utils.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, ) class PartialTPEmbedding(nn.Module): def __init__(self, prefix: str, weights): super().__init__() weight = weights.get_sharded(f"{prefix}.weight", dim=1) self.weight = nn.Parameter(weight) def forward(self, input: torch.Tensor) -> torch.Tensor: return torch.nn.functional.embedding(input, self.weight) @torch.jit.script def layer_norm(hidden_states, weight, epsilon): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + epsilon) # convert into half-precision if necessary if weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(weight.dtype) return weight * hidden_states class T5LayerNorm(nn.Module): def __init__(self, prefix, weights, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() weight = weights.get_tensor(f"{prefix}.weight") self.weight = nn.Parameter(weight) self.variance_epsilon = torch.tensor(eps) def forward(self, hidden_states): return layer_norm(hidden_states, self.weight, self.variance_epsilon) try: from apex.normalization import FusedRMSNorm T5LayerNorm = FusedRMSNorm # noqa logger.info( "Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm" ) except ImportError: # using the normal T5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm") pass ALL_LAYERNORM_LAYERS.append(T5LayerNorm) class T5DenseActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi", weights=weights, bias=False ) ### XXX: T5 models do not handle well both f16 and quantization. ### Overidding specifically this layer for that reason. ### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316 ### https://github.com/huggingface/transformers/issues/20287 _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load( config, prefix=f"{prefix}.wo", weights=weights, bias=False ) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ( ACT2FN[config.dense_act_fn] if "gelu" not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate="tanh") ) def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) # XXX: Recasting is already done within the layer norm. # Casting back to float16 here modifies results # hidden_states = hidden_states.to(dtype=self.wo_cast[1]) return hidden_states class T5DenseGatedActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi_0 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi_0", weights=weights, bias=False ) self.wi_1 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi_1", weights=weights, bias=False ) ### XXX: T5 models do not handle well both f16 and quantization. ### Overidding specifically this layer for that reason. ### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316 ### https://github.com/huggingface/transformers/issues/20287 _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load( config, prefix=f"{prefix}.wo", weights=weights, bias=False ) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ( ACT2FN[config.dense_act_fn] if "gelu" not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate="tanh") ) def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) # XXX: Recasting is already done within the layer norm. # Casting back to float16 here modifies results # hidden_states = hidden_states.to(dtype=self.wo_cast[1]) return hidden_states class T5LayerFF(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() if config.is_gated_act: self.DenseReluDense = T5DenseGatedActDense( config, prefix=f"{prefix}.DenseReluDense", weights=weights ) else: self.DenseReluDense = T5DenseActDense( config, prefix=f"{prefix}.DenseReluDense", weights=weights ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5Attention(nn.Module): def __init__( self, config: T5Config, prefix, weights, has_relative_attention_bias=False ): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim process_group = weights.process_group # Mesh TensorFlow initialization to avoid scaling before softmax assert self.n_heads % process_group.size() == 0 self.q = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q", weights=weights, bias=False ) self.k = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k", weights=weights, bias=False ) self.v = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v", weights=weights, bias=False ) self.o = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o", weights=weights, bias=False ) if self.n_heads % weights.process_group.size() != 0: raise ValueError( f"`n_heads` must be divisible by `num_shards` (got `n_heads`: {self.n_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.n_heads = self.n_heads // process_group.size() self.inner_dim = self.inner_dim // process_group.size() if self.has_relative_attention_bias: self.relative_attention_bias = PartialTPEmbedding( prefix=f"{prefix}.relative_attention_bias", weights=weights ) @staticmethod def _relative_position_bucket( relative_position, bidirectional=True, num_buckets=32, max_distance=128 ): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min( relative_position, torch.zeros_like(relative_position) ) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1), ) relative_buckets += torch.where( is_small, relative_position, relative_position_if_large ) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[ :, None ] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[ None, : ] relative_position = ( memory_position - context_position ) # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias( relative_position_bucket ) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze( 0 ) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), f"past_key_value should have 2 past states: keys and values. Got {len(past_key_value)} past states" real_seq_length += ( past_key_value[0].shape[2] if query_length is None else query_length ) key_length = ( real_seq_length if key_value_states is None else key_value_states.shape[1] ) def shape(states): """projection""" return states.view( batch_size, -1, self.n_heads, self.key_value_proj_dim ).transpose(1, 2) def unshape(states): """reshape""" return ( states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) ) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape( self.q(hidden_states) ) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None, ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None, ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype, ) else: position_bias = self.compute_bias( real_seq_length, key_length, device=scores.device ) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = ( position_bias + mask ) # (batch_size, n_heads, seq_length, key_length) position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape( torch.matmul(attn_weights, value_states) ) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = ( (key_states, value_states) if (self.is_decoder and use_cache) else None ) outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention( config, prefix=f"{prefix}.SelfAttention", weights=weights, has_relative_attention_bias=has_relative_attention_bias, ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[ 1: ] # add attentions if we output them return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.EncDecAttention = T5Attention( config, prefix=f"{prefix}.EncDecAttention", weights=weights, has_relative_attention_bias=False, ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[ 1: ] # add attentions if we output them return outputs class T5Block(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias: bool): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append( T5LayerSelfAttention( config, prefix=f"{prefix}.layer.0", weights=weights, has_relative_attention_bias=has_relative_attention_bias, ) ) if self.is_decoder: i = 2 self.layer.append( T5LayerCrossAttention( config, prefix=f"{prefix}.layer.1", weights=weights ) ) else: i = 1 self.layer.append( T5LayerFF(config, prefix=f"{prefix}.layer.{i}", weights=weights) ) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning( "`past_key_values` is passed to the encoder. Please make sure this is intended." ) expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[ 2: ] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = ( present_key_value_state + cross_attention_outputs[1] ) # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class T5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id." " See T5 docs for more information" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full( input_ids.shape[:-1] + (1,), decoder_start_token_id ) shifted_input_ids = torch.cat( [shifted_input_ids, input_ids[..., :-1]], dim=-1 ) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert ( pad_token_id is not None ), "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, prefix, weights, embed_tokens): super().__init__(config) self.is_decoder = config.is_decoder self.embed_tokens = embed_tokens self.block = nn.ModuleList( [ T5Block( config, prefix=f"{prefix}.block.{layer_id}", weights=weights, has_relative_attention_bias=(layer_id == 0), ) for layer_id in range(config.num_layers) ] ) self.final_layer_norm = T5LayerNorm( prefix=f"{prefix}.final_layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # Model parallel use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds" ) if inputs_embeds is None: assert ( self.embed_tokens is not None ), "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = ( past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length ) if use_cache is True: assert ( self.is_decoder ), f"`use_cache` can only be set to `True` if {self} is used as a decoder" if attention_mask is None: attention_mask = torch.ones( batch_size, mask_seq_length, device=inputs_embeds.device ) if ( self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None ): encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long, ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones( encoder_hidden_shape, device=inputs_embeds.device ) encoder_extended_attention_mask = self.invert_attention_mask( encoder_attention_mask ) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask( cross_attn_head_mask, self.config.num_layers ) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate( zip(self.block, past_key_values) ): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] # Model parallel if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[ 4 if output_attentions else 3 ] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + ( present_key_value_state, ) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) class T5ForConditionalGeneration(T5PreTrainedModel): def __init__(self, config: T5Config, weights): super().__init__(config) self.model_dim = config.d_model self.shared = TensorParallelEmbedding(prefix="shared", weights=weights) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack( config=encoder_config, prefix="encoder", weights=weights, embed_tokens=self.shared, ) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack( config=decoder_config, prefix="decoder", weights=weights, embed_tokens=self.shared, ) try: self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights ) except RuntimeError: # Some models like t5-small were saved with shared weights unlike flan # Since they are declared as the same arch we have no choice but hope # that this is OK instead of using a proper flag. self.lm_head = SpeculativeHead.load( config, prefix="shared", weights=weights ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if ( labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None ): # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) logits, speculative_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return ( Seq2SeqLMOutput( loss=loss, logits=logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ), speculative_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, decoder_attention_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "decoder_attention_mask": decoder_attention_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning( "You might want to consider setting `use_cache=True` to speed up decoding" ) return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select( 0, beam_idx.to(layer_past_state.device) ), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + ( reordered_layer_past_states, ) return reordered_decoder_past
text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py", "repo_id": "text-generation-inference", "token_count": 22496 }
225
import torch import time from dataclasses import dataclass from opentelemetry import trace from transformers import ( AutoProcessor, AutoTokenizer, PreTrainedTokenizerBase, ProcessorMixin, ) from typing import Optional, Tuple, List, Type, Dict from text_generation_server.models import Model from text_generation_server.models.types import ( Batch, Tokens, Generation, GeneratedText, ) from text_generation_server.pb import generate_pb2 from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling import re IMAGES = re.compile(r"!\[[^\]]*\]\((.*?)\s*(\"(?:.*[^\"])\")?\s*\)") def split(string): parts = [] cursor = 0 for pattern in IMAGES.finditer(string): start = pattern.start() if start != cursor: parts.append(string[cursor:start]) parts.append(pattern.group(1)) cursor = pattern.end() if cursor != len(string): parts.append(string[cursor:]) return parts tracer = trace.get_tracer(__name__) @dataclass class IdeficsCausalLMBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] # Decoder values input_ids: torch.Tensor attention_mask: torch.Tensor position_ids: torch.Tensor pixel_values: Optional[torch.Tensor] image_hidden_states: Optional[torch.Tensor] image_attention_mask: Optional[torch.Tensor] past_key_values: Optional[List[Tuple]] # All tokens all_input_ids: List[torch.Tensor] # Lengths of all generations present in the batch input_lengths: List[int] prefix_offsets: List[int] read_offsets: List[int] # Generation helpers next_token_choosers: List[NextTokenChooser] stopping_criterias: List[StoppingCriteria] # Metadata used for padding max_input_length: int padding_right_offset: int # Maximum number of tokens this batch will grow to max_tokens: int # Past metadata keys_head_dim_last: bool = True def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch( id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.max_tokens, ) @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, processor: ProcessorMixin, # Hack dtype: torch.dtype, device: torch.device, ) -> "IdeficsCausalLMBatch": inputs = [] next_token_choosers = [] stopping_criterias = [] prefix_offsets = [] read_offsets = [] requests_idx_mapping = {} # Parse batch max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for i, r in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(r.inputs) next_token_choosers.append( NextTokenChooser.from_pb(r.parameters, device, tokenizer) ) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) stopping_criterias.append(stopping_criteria) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max( padding_right_offset, stopping_criteria.max_new_tokens ) prompts = [] for inp in inputs: # Each input is encoded into a list, where each element of this input list is either a string or a URL prompts.append(split(inp)) # The processor replaces the call to tokenizer, and # a/ takes care of fetching images from the URL # b/ generate the correct input_ids, attention_mask, pixel_values, image_attention_mask to feed to the model tokenized_inputs = processor( prompts, return_tensors="pt", padding=True, truncation=True, max_length=max_truncation, add_end_of_utterance_token=False, # Already taken care of inside the prompts, so bypassing the processor's handling of this token ).to(device) for _ in pb.requests: input_len = tokenized_inputs["input_ids"].shape[1] prefix_offsets.append( input_len - 5 ) # To decode without potential fallbacks errors read_offsets.append( input_len ) # To decode without potential fallbacks errors input_lengths = tokenized_inputs["attention_mask"].sum(1) max_input_length = input_lengths.max() input_ids = tokenized_inputs["input_ids"] pixel_values = tokenized_inputs["pixel_values"] image_hidden_states = None # Allocate maximum attention_mask attention_mask = input_ids.new_zeros( (pb.size, max_input_length + padding_right_offset) ) # Copy tokenizer attention_mask into fully allocated attention_mask attention_mask[:, :max_input_length] = tokenized_inputs["attention_mask"] # Do the same for image_attention_mask image_attention_mask = input_ids.new_zeros( ( pb.size, max_input_length + padding_right_offset, tokenized_inputs["pixel_values"].size(1), ) ) image_attention_mask[:, :max_input_length, :] = tokenized_inputs[ "image_attention_mask" ] position_ids = tokenized_inputs["attention_mask"].long().cumsum(-1) - 1 position_ids.masked_fill_(tokenized_inputs["attention_mask"] == 0, 1) all_input_ids = tokenized_inputs["input_ids"].T.split( 1, dim=1 ) # It's input_ids but splitted into a tuple of tensors where each tensor is (seq_len, 1) size. It is then transformed into a list max_tokens = len(inputs) * (max_input_length + max_decode_tokens) return cls( batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, pixel_values=pixel_values, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, past_key_values=None, all_input_ids=list(all_input_ids), input_lengths=input_lengths.tolist(), prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, max_input_length=max_input_length.item(), padding_right_offset=padding_right_offset, max_tokens=max_tokens, ) @tracer.start_as_current_span("filter") def filter(self, request_ids: List[int]) -> Optional["IdeficsCausalLMBatch"]: # It deletes requests from the batch. For instance when client lost connection if len(request_ids) == 0: raise ValueError("Batch must have at least one request") if len(request_ids) == len(self): return self keep_indices = [] # New values after filtering requests_idx_mapping = {} requests = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] max_input_length = 0 next_token_choosers = [] stopping_criterias = [] total_remaining_decode_tokens = 0 new_padding_right_offset = 0 for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i keep_indices.append(idx) requests.append(self.requests[idx]) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) all_input_ids.append(self.all_input_ids[idx]) request_input_length = self.input_lengths[idx] input_lengths.append(request_input_length) max_input_length = max(max_input_length, request_input_length) next_token_choosers.append(self.next_token_choosers[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) remaining_decode_tokens = ( stopping_criteria.max_new_tokens - stopping_criteria.current_tokens ) total_remaining_decode_tokens += remaining_decode_tokens new_padding_right_offset = max( new_padding_right_offset, remaining_decode_tokens ) # Apply indices to input_ids, attention mask, past key values and other items that need to be cached input_ids = self.input_ids[keep_indices] position_ids = self.position_ids[keep_indices] self.attention_mask = self.attention_mask[ keep_indices, -(self.padding_right_offset + max_input_length) : ( self.attention_mask.shape[1] - self.padding_right_offset ) + new_padding_right_offset, ] # Do the same for pixel_values and image_attention_mask pixel_values = self.pixel_values[keep_indices] self.image_attention_mask = self.image_attention_mask[ keep_indices, -(self.padding_right_offset + max_input_length) : ( self.image_attention_mask.shape[1] - self.padding_right_offset ) + new_padding_right_offset, :, ] if self.image_hidden_states is None: image_hidden_states = None else: image_hidden_states = self.image_hidden_states[keep_indices] # Ensure that past_key_values tensors can be updated in-place if type(self.past_key_values[0]) == tuple: self.past_key_values = [list(layer) for layer in self.past_key_values] # Update tensors in-place to allow incremental garbage collection past_kv_length = max_input_length - 1 for layer in self.past_key_values: past_keys, past_values = layer if len(past_keys.shape) == 3: # Force past to be of dim [self_size, num_heads, ...] for easy indexing past_keys = past_keys.view(len(self), -1, *past_keys.shape[-2:]) past_values = past_values.view(len(self), -1, *past_values.shape[-2:]) if self.keys_head_dim_last: layer[0] = past_keys[keep_indices, :, -past_kv_length:, :] else: layer[0] = past_keys[keep_indices, :, :, -past_kv_length:] del past_keys layer[1] = past_values[keep_indices, :, -past_kv_length:, :] del past_values max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens self.requests = requests self.requests_idx_mapping = requests_idx_mapping self.input_ids = input_ids self.pixel_values = pixel_values self.image_hidden_states = image_hidden_states self.position_ids = position_ids self.all_input_ids = all_input_ids self.input_lengths = input_lengths self.prefix_offsets = prefix_offsets self.read_offsets = read_offsets self.next_token_choosers = next_token_choosers self.stopping_criterias = stopping_criterias self.max_input_length = max_input_length self.padding_right_offset = new_padding_right_offset self.max_tokens = max_tokens return self @classmethod @tracer.start_as_current_span("concatenate") def concatenate( cls, batches: List["IdeficsCausalLMBatch"] ) -> "IdeficsCausalLMBatch": # It adds new requests to the batch # Used for padding total_batch_size = 0 max_input_length = 0 max_num_images = 0 padding_right_offset = 0 for batch in batches: total_batch_size += len(batch) max_input_length = max(max_input_length, batch.max_input_length) max_num_images = max(max_num_images, batch.pixel_values.size(1)) padding_right_offset = max(padding_right_offset, batch.padding_right_offset) # Batch attributes requests = [] requests_idx_mapping = {} input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] next_token_choosers = [] stopping_criterias = [] max_tokens = 0 # Batch tensors input_ids = None attention_mask = None position_ids = None pixel_values = None image_hidden_states = None image_attention_mask = None past_key_values = [] # Used for slicing correctly inside the tensors # Equivalent to a cumsum on batch sizes start_index = 0 for i, batch in enumerate(batches): requests.extend(batch.requests) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) all_input_ids.extend(batch.all_input_ids) next_token_choosers.extend(batch.next_token_choosers) stopping_criterias.extend(batch.stopping_criterias) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: # We need to offset the mapping for each batch by the cumulative batch size for k, v in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + start_index # Slicing end index for this batch end_index = start_index + len(batch) # We only concatenate batches that did at least one step if batch.past_key_values is None: raise ValueError("only concatenate prefilled batches") # Create empty tensor # input_ids is always of shape [batch_size, 1] # We do not need to pad it if input_ids is None: input_ids = batch.input_ids.new_empty((total_batch_size, 1)) # Copy to correct indices input_ids[start_index:end_index] = batch.input_ids # Create padded tensor if attention_mask is None: attention_mask = batch.attention_mask.new_zeros( (total_batch_size, max_input_length + padding_right_offset), ) curr_batch_max_num_images = batch.pixel_values.size(1) if pixel_values is None: pixel_values = batch.pixel_values.new_zeros( (total_batch_size, max_num_images, 3, 224, 224) ) pixel_values[start_index:end_index, :curr_batch_max_num_images] = ( batch.pixel_values ) if image_attention_mask is None: image_attention_mask = batch.image_attention_mask.new_zeros( ( total_batch_size, max_input_length + padding_right_offset, max_num_images, ) ) # We need to slice the attention mask to remove padding from previous steps # and to remove unused allocated space left_offset = max_input_length - batch.max_input_length batch_left_offset = ( batch.attention_mask.shape[1] - batch.max_input_length - batch.padding_right_offset ) attention_mask[ start_index:end_index, left_offset:-padding_right_offset, ] = batch.attention_mask[ :, batch_left_offset : -batch.padding_right_offset, ] image_attention_mask[ start_index:end_index, left_offset:-padding_right_offset, :curr_batch_max_num_images, ] = batch.image_attention_mask[ :, batch_left_offset : -batch.padding_right_offset, : ] # Create empty tensor # position_ids is always of shape [batch_size, 1] if position_ids is None: position_ids = batch.position_ids.new_empty((total_batch_size, 1)) position_ids[start_index:end_index] = batch.position_ids # Shenanigans to get dimensions because BLOOM outputs a past with a different shape # BLOOM Keys: [batch_size * num_heads, head_dim, seq_length] # BLOOM Values: [batch_size * num_heads, seq_length, head_dim] # And ensure that we can update tensors in-place if type(batch.past_key_values[0]) == tuple: batch.past_key_values = [ [t.view(len(batch), -1, *t.shape[-2:]) for t in layer] for layer in batch.past_key_values ] elif len(batch.past_key_values[0][0].shape) == 3: for layer in batch.past_key_values: for k, t in enumerate(layer): layer[k] = t.view(len(batch), -1, *t.shape[-2:]) # Add eventual padding tokens that were added while concatenating max_tokens += batch.max_tokens + ( max_input_length - batch.max_input_length ) * len(batch) start_index = end_index first_past_kvs = batches[0].past_key_values _, num_heads, padded_sequence_length, head_dim = first_past_kvs[0][1].shape padded_past_values_shape = ( total_batch_size, num_heads, max_input_length - 1, head_dim, ) if batches[0].keys_head_dim_last: padded_past_keys_shape = padded_past_values_shape else: # seq_length is last for BLOOM padded_past_keys_shape = ( total_batch_size, num_heads, head_dim, max_input_length - 1, ) # Iterate over attention layers # Concatenate past key values layer by layer to allow incremental garbage collection for j in range(len(first_past_kvs)): padded_past_keys = first_past_kvs[j][0].new_zeros(padded_past_keys_shape) start_index = 0 for batch in batches: past_keys = batch.past_key_values[j][0] # Clear reference to the original tensor batch.past_key_values[j][0] = None # Slicing end index for this batch end_index = start_index + len(batch) # We slice the keys to remove the padding from previous batches past_seq_len = batch.max_input_length - 1 if batch.keys_head_dim_last: padded_past_keys[start_index:end_index, :, -past_seq_len:, :] = ( past_keys[:, :, -past_seq_len:, :] ) else: # BLOOM case padded_past_keys[start_index:end_index, :, :, -past_seq_len:] = ( past_keys[:, :, :, -past_seq_len:] ) del past_keys start_index = end_index padded_past_values = first_past_kvs[j][1].new_zeros( padded_past_values_shape ) start_index = 0 for batch in batches: past_values = batch.past_key_values[j][1] # Clear reference to the original tensor batch.past_key_values[j][1] = None # Slicing end index for this batch end_index = start_index + len(batch) # We slice the past values to remove the padding from previous batches past_seq_len = batch.max_input_length - 1 padded_past_values[start_index:end_index, :, -past_seq_len:, :] = ( past_values[:, :, -past_seq_len:, :] ) del past_values # Update values start_index = end_index past_key_values.append([padded_past_keys, padded_past_values]) return cls( batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, pixel_values=pixel_values, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, past_key_values=past_key_values, all_input_ids=all_input_ids, input_lengths=input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, max_input_length=max_input_length, padding_right_offset=padding_right_offset, keys_head_dim_last=batches[0].keys_head_dim_last, max_tokens=max_tokens, ) def __len__(self): return len(self.requests) class IdeficsCausalLM(Model): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): from text_generation_server.models.custom_modeling.idefics_modeling import ( IdeficsForVisionText2Text, ) if torch.cuda.is_available(): device = torch.device("cuda") dtype = torch.bfloat16 if dtype is None else dtype else: if quantize: raise ValueError("quantization is not available on CPU") device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) self.processor = AutoProcessor.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) model = IdeficsForVisionText2Text.from_pretrained( model_id, revision=revision, torch_dtype=dtype, device_map=( "auto" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None ), load_in_8bit=quantize == "bitsandbytes", trust_remote_code=trust_remote_code, ) if torch.cuda.is_available() and torch.cuda.device_count() == 1: model = model.cuda() if tokenizer.pad_token_id is None: if model.config.pad_token_id is not None: tokenizer.pad_token_id = model.config.pad_token_id elif model.config.eos_token_id is not None: tokenizer.pad_token_id = model.config.eos_token_id elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id else: tokenizer.add_special_tokens({"pad_token": "<unk>"}) super(IdeficsCausalLM, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, ) @property def batch_type(self) -> Type[IdeficsCausalLMBatch]: return IdeficsCausalLMBatch def forward( self, input_ids, attention_mask, position_ids, pixel_values, image_hidden_states, image_attention_mask, past_key_values: Optional = None, ) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: # Model Forward kwargs = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "image_hidden_states": image_hidden_states, "image_attention_mask": image_attention_mask, "past_key_values": past_key_values, "use_cache": True, "return_dict": True, } if self.has_position_ids: kwargs["position_ids"] = position_ids outputs, speculative_logits = self.model.forward(**kwargs) return ( outputs.logits, speculative_logits, outputs.past_key_values, outputs.image_hidden_states, ) @tracer.start_as_current_span("generate_token") def generate_token( self, batch: IdeficsCausalLMBatch ) -> Tuple[List[Generation], Optional[IdeficsCausalLMBatch], Tuple[int, int]]: start = time.time_ns() # slice the attention mask to the correct shape attention_mask = batch.attention_mask[:, : -batch.padding_right_offset] if batch.input_ids.size(1) == 1: # THIS is a hack: when calling idefics.generate, the first time, we need the whole image_attention_mask (size bs x max_seq_len x max_num_images), # but the subsequent times, we only need the last attention mask along the `max_seq_len` dimension # this is due to the nature IDEFICS: it's an encoder decoder, and so when decoding, only the currently generated # token need to attend to the encoder hidden states (i.e. the vision encoder) # Also see seq2seq_lm.Seq2SeqLM.generate_token which has roughly the same logic image_attention_mask = batch.image_attention_mask[ :, -(batch.padding_right_offset + 1) ].unsqueeze(1) else: image_attention_mask = batch.image_attention_mask[ :, : -batch.padding_right_offset ] logits, speculative_logits, past, image_hidden_states = self.forward( input_ids=batch.input_ids, attention_mask=attention_mask, position_ids=batch.position_ids, pixel_values=batch.pixel_values, image_hidden_states=batch.image_hidden_states, image_attention_mask=image_attention_mask, past_key_values=batch.past_key_values, ) # Hardcoded remove image tokens logits[:, 32000:32001] = torch.finfo(logits.dtype).min start_decode = time.time_ns() # Results generations: List[Generation] = [] stopped = True # Zipped iterator iterator = zip( batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, logits, batch.next_token_choosers, batch.stopping_criterias, batch.all_input_ids, ) # For each member of the batch for i, ( request, input_length, prefix_offset, read_offset, logits, next_token_chooser, stopping_criteria, all_input_ids, ) in enumerate(iterator): # Select next token next_token_id, logprobs = next_token_chooser( all_input_ids.view(1, -1), logits[-1:, :] ) # Append next token to all tokens all_input_ids = torch.cat([all_input_ids, next_token_id]) new_input_length = input_length + 1 # Generated token next_token_logprob = logprobs[-1, next_token_id] next_token_id_squeezed = next_token_id.squeeze() next_token_text, prefix_offset, read_offset = self.decode_token( all_input_ids[:, 0], prefix_offset, read_offset ) # Evaluate stopping criteria stop, reason = stopping_criteria( next_token_id_squeezed, next_token_text, ) if not stop: stopped = False # Shard generations # All generations will be appended in the rust sharded client if i % self.world_size == self.rank: if stop: # Decode generated tokens output_text, _, _ = self.decode_token( all_input_ids[:, 0], prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True, ) # Get seed if isinstance(next_token_chooser.choice, Sampling): seed = next_token_chooser.choice.seed else: seed = None generated_text = GeneratedText( output_text, stopping_criteria.current_tokens, reason, seed ) else: generated_text = None # Prefill if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: # Remove generated token to only have prefill and add nan for first prompt token prefill_logprobs = [float("nan")] + torch.log_softmax( logits, -1 ).gather(1, all_input_ids[1:]).squeeze(1)[ -new_input_length:-1 ].tolist() prefill_token_ids = all_input_ids[-new_input_length:-1] prefill_texts = self.tokenizer.batch_decode( prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) prefill_tokens = Tokens( prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[], ) else: prefill_tokens = None top_tokens = None generation = Generation( request.id, prefill_tokens, Tokens( [next_token_id_squeezed], [next_token_logprob], [next_token_text], [next_token_id_squeezed.item() in self.all_special_ids], ), generated_text, top_tokens, ) generations.append(generation) # Update values batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar( next_token_id_squeezed.item() ) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.max_input_length = max(batch.max_input_length, new_input_length) # We finished all generations in the batch; there is no next batch if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, None, (forward_ns, decode_ns) # Slice unused values from prefill batch.input_ids = batch.input_ids[:, :1] # Update attention_mask as we added a new token to input_ids batch.attention_mask[:, -batch.padding_right_offset] = 1 batch.image_attention_mask[:, -batch.padding_right_offset, :] = ( batch.image_attention_mask[:, -(batch.padding_right_offset + 1), :] ) # Decrease right offset batch.padding_right_offset -= 1 # Update position_ids batch.position_ids = batch.position_ids[:, -1:] + 1 # Update past key values batch.past_key_values = past batch.image_hidden_states = image_hidden_states forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, batch, (forward_ns, decode_ns)
text-generation-inference/server/text_generation_server/models/idefics_causal_lm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/idefics_causal_lm.py", "repo_id": "text-generation-inference", "token_count": 16378 }
226
# Copied logic from https://github.com/mit-han-lab/llm-awq/blob/f084f40bd996f3cf3a0633c1ad7d9d476c318aaa/awq/quantize/qmodule.py import math import torch import torch.nn as nn import awq_inference_engine # with CUDA kernels # class ScaledActivation(nn.Module): # def __init__(self, module, scales): # super().__init__() # self.act = module # self.scales = nn.Parameter(scales.data) # # def forward(self, x): # return self.act(x) / self.scales.view(1, 1, -1).to(x.device) class WQLinear(nn.Module): def __init__(self, w_bit, group_size, qweight, qzeros, scales, bias): super().__init__() if w_bit not in [4]: raise NotImplementedError("Only 4-bit are supported for now.") self.in_features = qweight.shape[0] self.out_features = qweight.shape[1] * 32 // w_bit self.w_bit = w_bit self.group_size = group_size if group_size != -1 else self.in_features # quick sanity check (make sure aligment) assert self.in_features % self.group_size == 0 assert self.out_features % (32 // self.w_bit) == 0 self.qweight = qweight self.qzeros = qzeros self.scales = scales if bias: self.bias = bias else: self.bias = None @torch.no_grad() def forward(self, x): out_shape = x.shape[:-1] + (self.out_features,) out = awq_inference_engine.gemm_forward_cuda( x.reshape(-1, x.shape[-1]), self.qweight, self.scales, self.qzeros, 8 ) out = out + self.bias if self.bias is not None else out return out.reshape(out_shape)
text-generation-inference/server/text_generation_server/utils/awq/quantize/qmodule.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/awq/quantize/qmodule.py", "repo_id": "text-generation-inference", "token_count": 770 }
227
SPECULATE = None def get_speculate() -> int: global SPECULATE return SPECULATE def set_speculate(speculate: int): global SPECULATE SPECULATE = speculate
text-generation-inference/server/text_generation_server/utils/speculate.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/speculate.py", "repo_id": "text-generation-inference", "token_count": 66 }
228
/* eslint-disable @typescript-eslint/no-explicit-any */ import { bertProcessing, byteLevelProcessing, robertaProcessing, sequenceProcessing, templateProcessing } from '../../' describe('bertProcessing', () => { it('instantiates correctly with only two parameters', () => { const processor = bertProcessing(['sep', 1], ['cls', 2]) expect(processor.constructor.name).toEqual('Processor') }) it('throws if only one argument is provided', () => { expect(() => (bertProcessing as any)(['sep', 1])).toThrow('Given napi value is not an array') }) it('throws if arguments are malformed', () => { expect(() => (bertProcessing as any)(['sep', '1'], ['cls', '2'])).toThrow( 'Failed to convert napi value String into rust type `u32`', ) expect(() => (bertProcessing as any)(['sep'], ['cls'])).toThrow('Array length < 2') }) }) describe('byteLevelProcessing', () => { it('instantiates correctly without any parameter', () => { const processor = byteLevelProcessing() expect(processor.constructor.name).toEqual('Processor') }) it('accepts `undefined` as first parameter', () => { expect(byteLevelProcessing(undefined)).toBeDefined() }) it('accepts `boolean` as first parameter', () => { expect(byteLevelProcessing(true)).toBeDefined() }) }) describe('robertaProcessing', () => { it('instantiates correctly with only two parameters', () => { const processor = robertaProcessing(['sep', 1], ['cls', 2]) expect(processor.constructor.name).toEqual('Processor') }) it('accepts `undefined` as third and fourth parameters', () => { expect(robertaProcessing(['sep', 1], ['cls', 2], undefined, undefined)).toBeDefined() }) it('accepts `boolean` as third and fourth parameter', () => { expect(robertaProcessing(['sep', 1], ['cls', 2], true, true)).toBeDefined() }) }) describe('templateProcessing', () => { it('instantiates correctly with only a single template', () => { const processor = templateProcessing('$A $A') expect(processor.constructor.name).toEqual('Processor') }) it('throws if special tokens are missing', () => { expect(() => templateProcessing('[CLS] $A [SEP]')).toThrow('Missing SpecialToken(s) with id(s)') }) it('instantiates correctly with both templates', () => { const processor = templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [ ['[CLS]', 1], ['[SEP]', 2], ]) expect(processor.constructor.name).toEqual('Processor') }) }) describe('sequenceProcessing', () => { it('accepts `PostProcessor[]` as first parameter', () => { const template = templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [ ['[CLS]', 1], ['[SEP]', 2], ]) const bytelevel = byteLevelProcessing(true) expect(sequenceProcessing([bytelevel, template])).toBeDefined() }) })
tokenizers/bindings/node/lib/bindings/post-processors.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/post-processors.test.ts", "repo_id": "tokenizers", "token_count": 1022 }
229
# `tokenizers-linux-arm64-gnu` This is the **aarch64-unknown-linux-gnu** binary for `tokenizers`
tokenizers/bindings/node/npm/linux-arm64-gnu/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/linux-arm64-gnu/README.md", "repo_id": "tokenizers", "token_count": 35 }
230
use serde::de::Deserializer; use serde::ser::Serializer; use serde::{Deserialize, Serialize}; use std::sync::{Arc, RwLock}; pub fn serialize<S, T>(val: &Option<Arc<RwLock<T>>>, s: S) -> Result<S::Ok, S::Error> where S: Serializer, T: Serialize, { T::serialize(&*(val.clone().unwrap()).read().unwrap(), s) } pub fn deserialize<'de, D, T>(d: D) -> Result<Option<Arc<RwLock<T>>>, D::Error> where D: Deserializer<'de>, T: Deserialize<'de>, { Ok(Some(Arc::new(RwLock::new(T::deserialize(d)?)))) }
tokenizers/bindings/node/src/arc_rwlock_serde.rs/0
{ "file_path": "tokenizers/bindings/node/src/arc_rwlock_serde.rs", "repo_id": "tokenizers", "token_count": 220 }
231
# Generated content DO NOT EDIT class AddedToken: """ Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. It can have special options that defines the way it should behave. Args: content (:obj:`str`): The content of the token single_word (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should only match single words. If :obj:`True`, this token will never match inside of a word. For example the token ``ing`` would match on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. The notion of "`inside of a word`" is defined by the word boundaries pattern in regular expressions (ie. the token should start and end with word boundaries). lstrip (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should strip all potential whitespaces on its left side. If :obj:`True`, this token will greedily match any whitespace on its left. For example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). rstrip (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should strip all potential whitespaces on its right side. If :obj:`True`, this token will greedily match any whitespace on its right. It works just like :obj:`lstrip` but on the right. normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): Defines whether this token should match against the normalized version of the input text. For example, with the added token ``"yesterday"``, and a normalizer in charge of lowercasing the text, the token could be extract from the input ``"I saw a lion Yesterday"``. special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): Defines whether this token should be skipped when decoding. """ def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False): pass @property def content(self): """ Get the content of this :obj:`AddedToken` """ pass @property def lstrip(self): """ Get the value of the :obj:`lstrip` option """ pass @property def normalized(self): """ Get the value of the :obj:`normalized` option """ pass @property def rstrip(self): """ Get the value of the :obj:`rstrip` option """ pass @property def single_word(self): """ Get the value of the :obj:`single_word` option """ pass @property def special(self): """ Get the value of the :obj:`special` option """ pass class Encoding: """ The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. """ @property def attention_mask(self): """ The attention mask This indicates to the LM which tokens should be attended to, and which should not. This is especially important when batching sequences, where we need to applying padding. Returns: :obj:`List[int]`: The attention mask """ pass def char_to_token(self, char_pos, sequence_index=0): """ Get the token that contains the char at the given position in the input sequence. Args: char_pos (:obj:`int`): The position of a char in the input string sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target char Returns: :obj:`int`: The index of the token that contains this char in the encoded sequence """ pass def char_to_word(self, char_pos, sequence_index=0): """ Get the word that contains the char at the given position in the input sequence. Args: char_pos (:obj:`int`): The position of a char in the input string sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target char Returns: :obj:`int`: The index of the word that contains this char in the input sequence """ pass @property def ids(self): """ The generated IDs The IDs are the main input to a Language Model. They are the token indices, the numerical representations that a LM understands. Returns: :obj:`List[int]`: The list of IDs """ pass @staticmethod def merge(encodings, growing_offsets=True): """ Merge the list of encodings into one final :class:`~tokenizers.Encoding` Args: encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): The list of encodings that should be merged in one growing_offsets (:obj:`bool`, defaults to :obj:`True`): Whether the offsets should accumulate while merging Returns: :class:`~tokenizers.Encoding`: The resulting Encoding """ pass @property def n_sequences(self): """ The number of sequences represented Returns: :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` """ pass @property def offsets(self): """ The offsets associated to each token These offsets let's you slice the input string, and thus retrieve the original part that led to producing the corresponding token. Returns: A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets """ pass @property def overflowing(self): """ A :obj:`List` of overflowing :class:`~tokenizers.Encoding` When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting the output into as many pieces as required to match the specified maximum length. This field lets you retrieve all the subsequent pieces. When you use pairs of sequences, the overflowing pieces will contain enough variations to cover all the possible combinations, while respecting the provided maximum length. """ pass def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"): """ Pad the :class:`~tokenizers.Encoding` at the given length Args: length (:obj:`int`): The desired length direction: (:obj:`str`, defaults to :obj:`right`): The expected padding direction. Can be either :obj:`right` or :obj:`left` pad_id (:obj:`int`, defaults to :obj:`0`): The ID corresponding to the padding token pad_type_id (:obj:`int`, defaults to :obj:`0`): The type ID corresponding to the padding token pad_token (:obj:`str`, defaults to `[PAD]`): The pad token to use """ pass @property def sequence_ids(self): """ The generated sequence indices. They represent the index of the input sequence associated to each token. The sequence id can be None if the token is not related to any input sequence, like for example with special tokens. Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. """ pass def set_sequence_id(self, sequence_id): """ Set the given sequence index Set the given sequence index for the whole range of tokens contained in this :class:`~tokenizers.Encoding`. """ pass @property def special_tokens_mask(self): """ The special token mask This indicates which tokens are special tokens, and which are not. Returns: :obj:`List[int]`: The special tokens mask """ pass def token_to_chars(self, token_index): """ Get the offsets of the token at the given index. The returned offsets are related to the input sequence that contains the token. In order to determine in which input sequence it belongs, you must call :meth:`~tokenizers.Encoding.token_to_sequence()`. Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` """ pass def token_to_sequence(self, token_index): """ Get the index of the sequence represented by the given token. In the general use case, this method returns :obj:`0` for a single sequence or the first sequence of a pair, and :obj:`1` for the second sequence of a pair Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`int`: The sequence id of the given token """ pass def token_to_word(self, token_index): """ Get the index of the word that contains the token in one of the input sequences. The returned word index is related to the input sequence that contains the token. In order to determine in which input sequence it belongs, you must call :meth:`~tokenizers.Encoding.token_to_sequence()`. Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`int`: The index of the word in the relevant input sequence. """ pass @property def tokens(self): """ The generated tokens They are the string representation of the IDs. Returns: :obj:`List[str]`: The list of tokens """ pass def truncate(self, max_length, stride=0, direction="right"): """ Truncate the :class:`~tokenizers.Encoding` at the given length If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating this information is lost. It will be considered as representing a single sequence. Args: max_length (:obj:`int`): The desired length stride (:obj:`int`, defaults to :obj:`0`): The length of previous content to be included in each overflowing piece direction (:obj:`str`, defaults to :obj:`right`): Truncate direction """ pass @property def type_ids(self): """ The generated type IDs Generally used for tasks like sequence classification or question answering, these tokens let the LM know which input sequence corresponds to each tokens. Returns: :obj:`List[int]`: The list of type ids """ pass @property def word_ids(self): """ The generated word indices. They represent the index of the word associated to each token. When the input is pre-tokenized, they correspond to the ID of the given input label, otherwise they correspond to the words indices as defined by the :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. For special tokens and such (any token that was generated from something that was not part of the input), the output is :obj:`None` Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. """ pass def word_to_chars(self, word_index, sequence_index=0): """ Get the offsets of the word at the given index in one of the input sequences. Args: word_index (:obj:`int`): The index of a word in one of the input sequences. sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target word Returns: :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` """ pass def word_to_tokens(self, word_index, sequence_index=0): """ Get the encoded tokens corresponding to the word at the given index in one of the input sequences. Args: word_index (:obj:`int`): The index of a word in one of the input sequences. sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target word Returns: :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` """ pass @property def words(self): """ The generated word indices. .. warning:: This is deprecated and will be removed in a future version. Please use :obj:`~tokenizers.Encoding.word_ids` instead. They represent the index of the word associated to each token. When the input is pre-tokenized, they correspond to the ID of the given input label, otherwise they correspond to the words indices as defined by the :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. For special tokens and such (any token that was generated from something that was not part of the input), the output is :obj:`None` Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. """ pass class NormalizedString: """ NormalizedString A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. While making all the requested modifications, it keeps track of the alignment information between the two versions of the string. Args: sequence: str: The string sequence used to initialize this NormalizedString """ def append(self, s): """ Append the given sequence to the string """ pass def clear(self): """ Clears the string """ pass def filter(self, func): """ Filter each character of the string using the given func """ pass def for_each(self, func): """ Calls the given function for each character of the string """ pass def lowercase(self): """ Lowercase the string """ pass def lstrip(self): """ Strip the left of the string """ pass def map(self, func): """ Calls the given function for each character of the string Replaces each character of the string using the returned value. Each returned value **must** be a str of length 1 (ie a character). """ pass def nfc(self): """ Runs the NFC normalization """ pass def nfd(self): """ Runs the NFD normalization """ pass def nfkc(self): """ Runs the NFKC normalization """ pass def nfkd(self): """ Runs the NFKD normalization """ pass @property def normalized(self): """ The normalized part of the string """ pass def prepend(self, s): """ Prepend the given sequence to the string """ pass def replace(self, pattern, content): """ Replace the content of the given pattern with the provided content Args: pattern: Pattern: A pattern used to match the string. Usually a string or a Regex content: str: The content to be used as replacement """ pass def rstrip(self): """ Strip the right of the string """ pass def slice(self, range): """ Slice the string using the given range """ pass def split(self, pattern, behavior): """ Split the NormalizedString using the given pattern and the specified behavior Args: pattern: Pattern: A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` behavior: SplitDelimiterBehavior: The behavior to use when splitting. Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", "contiguous" Returns: A list of NormalizedString, representing each split """ pass def strip(self): """ Strip both ends of the string """ pass def uppercase(self): """ Uppercase the string """ pass class PreTokenizedString: """ PreTokenizedString Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the underlying string, while keeping track of the alignment information (offsets). The PreTokenizedString manages what we call `splits`. Each split represents a substring which is a subpart of the original string, with the relevant offsets and tokens. When calling one of the methods used to modify the PreTokenizedString (namely one of `split`, `normalize` or `tokenize), only the `splits` that don't have any associated tokens will get modified. Args: sequence: str: The string sequence used to initialize this PreTokenizedString """ def __init__(self, sequence): pass def get_splits(self, offset_referential="original", offset_type="char"): """ Get the splits currently managed by the PreTokenizedString Args: offset_referential: :obj:`str` Whether the returned splits should have offsets expressed relative to the original string, or the normalized one. choices: "original", "normalized". offset_type: :obj:`str` Whether the returned splits should have offsets expressed in bytes or chars. When slicing an str, we usually want to use chars, which is the default value. Now in some cases it might be interesting to get these offsets expressed in bytes, so it is possible to change this here. choices: "char", "bytes" Returns A list of splits """ pass def normalize(self, func): """ Normalize each split of the `PreTokenizedString` using the given `func` Args: func: Callable[[NormalizedString], None]: The function used to normalize each underlying split. This function does not need to return anything, just calling the methods on the provided NormalizedString allow its modification. """ pass def split(self, func): """ Split the PreTokenizedString using the given `func` Args: func: Callable[[index, NormalizedString], List[NormalizedString]]: The function used to split each underlying split. It is expected to return a list of `NormalizedString`, that represent the new splits. If the given `NormalizedString` does not need any splitting, we can just return it directly. In order for the offsets to be tracked accurately, any returned `NormalizedString` should come from calling either `.split` or `.slice` on the received one. """ pass def to_encoding(self, type_id=0, word_idx=None): """ Return an Encoding generated from this PreTokenizedString Args: type_id: int = 0: The type_id to be used on the generated Encoding. word_idx: Optional[int] = None: An optional word index to be used for each token of this Encoding. If provided, all the word indices in the generated Encoding will use this value, instead of the one automatically tracked during pre-tokenization. Returns: An Encoding """ pass def tokenize(self, func): """ Tokenize each split of the `PreTokenizedString` using the given `func` Args: func: Callable[[str], List[Token]]: The function used to tokenize each underlying split. This function must return a list of Token generated from the input str. """ pass class Regex: """ Instantiate a new Regex with the given pattern """ def __init__(self, pattern): pass class Token: pass class Tokenizer: """ A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input and outputs an :class:`~tokenizers.Encoding`. Args: model (:class:`~tokenizers.models.Model`): The core algorithm that this :obj:`Tokenizer` should be using. """ def __init__(self, model): pass def add_special_tokens(self, tokens): """ Add the given special tokens to the Tokenizer. If these tokens are already part of the vocabulary, it just let the Tokenizer know about them. If they don't exist, the Tokenizer creates them, giving them a new id. These special tokens will never be processed by the model (ie won't be split into multiple tokens), and they can be removed from the output when decoding. Args: tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): The list of special tokens we want to add to the vocabulary. Each token can either be a string or an instance of :class:`~tokenizers.AddedToken` for more customization. Returns: :obj:`int`: The number of tokens that were created in the vocabulary """ pass def add_tokens(self, tokens): """ Add the given tokens to the vocabulary The given tokens are added only if they don't already exist in the vocabulary. Each token then gets a new attributed id. Args: tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): The list of tokens we want to add to the vocabulary. Each token can be either a string or an instance of :class:`~tokenizers.AddedToken` for more customization. Returns: :obj:`int`: The number of tokens that were created in the vocabulary """ pass def decode(self, ids, skip_special_tokens=True): """ Decode the given list of ids back to a string This is used to decode anything coming back from a Language Model Args: ids (A :obj:`List/Tuple` of :obj:`int`): The list of ids that we want to decode skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether the special tokens should be removed from the decoded string Returns: :obj:`str`: The decoded string """ pass def decode_batch(self, sequences, skip_special_tokens=True): """ Decode a batch of ids back to their corresponding string Args: sequences (:obj:`List` of :obj:`List[int]`): The batch of sequences we want to decode skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether the special tokens should be removed from the decoded strings Returns: :obj:`List[str]`: A list of decoded strings """ pass @property def decoder(self): """ The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer """ pass def enable_padding( self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None ): """ Enable the padding Args: direction (:obj:`str`, `optional`, defaults to :obj:`right`): The direction in which to pad. Can be either ``right`` or ``left`` pad_to_multiple_of (:obj:`int`, `optional`): If specified, the padding length should always snap to the next multiple of the given value. For example if we were going to pad witha length of 250 but ``pad_to_multiple_of=8`` then we will pad to 256. pad_id (:obj:`int`, defaults to 0): The id to be used when padding pad_type_id (:obj:`int`, defaults to 0): The type id to be used when padding pad_token (:obj:`str`, defaults to :obj:`[PAD]`): The pad token to be used when padding length (:obj:`int`, `optional`): If specified, the length at which to pad. If not specified we pad using the size of the longest sequence in a batch. """ pass def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"): """ Enable truncation Args: max_length (:obj:`int`): The max length at which to truncate stride (:obj:`int`, `optional`): The length of the previous first sequence to be included in the overflowing sequence strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or ``only_second``. direction (:obj:`str`, defaults to :obj:`right`): Truncate direction """ pass def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True): """ Encode the given sequence and pair. This method can process raw text sequences as well as already pre-tokenized sequences. Example: Here are some examples of the inputs that are accepted:: encode("A single sequence")` encode("A sequence", "And its pair")` encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` encode( [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], is_pretokenized=True ) Args: sequence (:obj:`~tokenizers.InputSequence`): The main input sequence we want to encode. This sequence can be either raw text or pre-tokenized, according to the ``is_pretokenized`` argument: - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` pair (:obj:`~tokenizers.InputSequence`, `optional`): An optional input sequence. The expected format is the same that for ``sequence``. is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Whether the input is already pre-tokenized add_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to add the special tokens Returns: :class:`~tokenizers.Encoding`: The encoded result """ pass def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True): """ Encode the given batch of inputs. This method accept both raw text sequences as well as already pre-tokenized sequences. Example: Here are some examples of the inputs that are accepted:: encode_batch([ "A single sequence", ("A tuple with a sequence", "And its pair"), [ "A", "pre", "tokenized", "sequence" ], ([ "A", "pre", "tokenized", "sequence" ], "And its pair") ]) Args: input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): A list of single sequences or pair sequences to encode. Each sequence can be either raw text or pre-tokenized, according to the ``is_pretokenized`` argument: - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Whether the input is already pre-tokenized add_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to add the special tokens Returns: A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch """ pass @property def encode_special_tokens(self): """ Modifies the tokenizer in order to use or not the special tokens during encoding. Args: value (:obj:`bool`): Whether to use the special tokens or not """ pass @staticmethod def from_buffer(buffer): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. Args: buffer (:obj:`bytes`): A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_file(path): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. Args: path (:obj:`str`): A path to a local JSON file representing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_pretrained(identifier, revision="main", auth_token=None): """ Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the Hugging Face Hub. Args: identifier (:obj:`str`): The identifier of a Model on the Hugging Face Hub, that contains a tokenizer.json file revision (:obj:`str`, defaults to `main`): A branch or commit id auth_token (:obj:`str`, `optional`, defaults to `None`): An optional auth token used to access private repositories on the Hugging Face Hub Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_str(json): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. Args: json (:obj:`str`): A valid JSON string representing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass def get_added_tokens_decoder(self): """ Get the underlying vocabulary Returns: :obj:`Dict[int, AddedToken]`: The vocabulary """ pass def get_vocab(self, with_added_tokens=True): """ Get the underlying vocabulary Args: with_added_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to include the added tokens Returns: :obj:`Dict[str, int]`: The vocabulary """ pass def get_vocab_size(self, with_added_tokens=True): """ Get the size of the underlying vocabulary Args: with_added_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to include the added tokens Returns: :obj:`int`: The size of the vocabulary """ pass def id_to_token(self, id): """ Convert the given id to its corresponding token if it exists Args: id (:obj:`int`): The id to convert Returns: :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary """ pass @property def model(self): """ The :class:`~tokenizers.models.Model` in use by the Tokenizer """ pass def no_padding(self): """ Disable padding """ pass def no_truncation(self): """ Disable truncation """ pass @property def normalizer(self): """ The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer """ pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. :param is_pair: Boolean indicating if the input would be a single sentence or a pair :return: """ pass @property def padding(self): """ Get the current padding parameters `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` Returns: (:obj:`dict`, `optional`): A dict with the current padding parameters if padding is enabled """ pass def post_process(self, encoding, pair=None, add_special_tokens=True): """ Apply all the post-processing steps to the given encodings. The various steps are: 1. Truncate according to the set truncation params (provided with :meth:`~tokenizers.Tokenizer.enable_truncation`) 2. Apply the :class:`~tokenizers.processors.PostProcessor` 3. Pad according to the set padding params (provided with :meth:`~tokenizers.Tokenizer.enable_padding`) Args: encoding (:class:`~tokenizers.Encoding`): The :class:`~tokenizers.Encoding` corresponding to the main sequence. pair (:class:`~tokenizers.Encoding`, `optional`): An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. add_special_tokens (:obj:`bool`): Whether to add the special tokens Returns: :class:`~tokenizers.Encoding`: The final post-processed encoding """ pass @property def post_processor(self): """ The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer """ pass @property def pre_tokenizer(self): """ The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer """ pass def save(self, path, pretty=True): """ Save the :class:`~tokenizers.Tokenizer` to the file at the given path. Args: path (:obj:`str`): A path to a file in which to save the serialized tokenizer. pretty (:obj:`bool`, defaults to :obj:`True`): Whether the JSON file should be pretty formatted. """ pass def to_str(self, pretty=False): """ Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. Args: pretty (:obj:`bool`, defaults to :obj:`False`): Whether the JSON string should be pretty formatted. Returns: :obj:`str`: A string representing the serialized Tokenizer """ pass def token_to_id(self, token): """ Convert the given token to its corresponding id if it exists Args: token (:obj:`str`): The token to convert Returns: :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary """ pass def train(self, files, trainer=None): """ Train the Tokenizer using the given files. Reads the files line by line, while keeping all the whitespace, even new lines. If you want to train from data store in-memory, you can check :meth:`~tokenizers.Tokenizer.train_from_iterator` Args: files (:obj:`List[str]`): A list of path to the files that we should use for training trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): An optional trainer that should be used to train our Model """ pass def train_from_iterator(self, iterator, trainer=None, length=None): """ Train the Tokenizer using the provided iterator. You can provide anything that is a Python Iterator * A list of sequences :obj:`List[str]` * A generator that yields :obj:`str` or :obj:`List[str]` * A Numpy array of strings * ... Args: iterator (:obj:`Iterator`): Any iterator over strings or list of strings trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): An optional trainer that should be used to train our Model length (:obj:`int`, `optional`): The total number of sequences in the iterator. This is used to provide meaningful progress tracking """ pass @property def truncation(self): """ Get the currently set truncation parameters `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` Returns: (:obj:`dict`, `optional`): A dict with the current truncation parameters if truncation is enabled """ pass
tokenizers/bindings/python/py_src/tokenizers/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/__init__.pyi", "repo_id": "tokenizers", "token_count": 16502 }
232
# Generated content DO NOT EDIT from .. import processors PostProcessor = processors.PostProcessor BertProcessing = processors.BertProcessing ByteLevel = processors.ByteLevel RobertaProcessing = processors.RobertaProcessing Sequence = processors.Sequence TemplateProcessing = processors.TemplateProcessing
tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py", "repo_id": "tokenizers", "token_count": 74 }
233
#![warn(clippy::all)] #![allow(clippy::upper_case_acronyms)] // Many false positives with pyo3 it seems &str, and &PyAny get flagged #![allow(clippy::borrow_deref_ref)] extern crate tokenizers as tk; mod decoders; mod encoding; mod error; mod models; mod normalizers; mod pre_tokenizers; mod processors; mod token; mod tokenizer; mod trainers; mod utils; use pyo3::prelude::*; use pyo3::wrap_pymodule; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); // For users using multiprocessing in python, it is quite easy to fork the process running // tokenizers, ending up with a deadlock because we internaly make use of multithreading. So // we register a callback to be called in the event of a fork so that we can warn the user. #[cfg(target_family = "unix")] static mut REGISTERED_FORK_CALLBACK: bool = false; #[cfg(target_family = "unix")] extern "C" fn child_after_fork() { use tk::parallelism::*; if has_parallelism_been_used() && !is_parallelism_configured() { eprintln!( "huggingface/tokenizers: The current process just got forked, after parallelism has \ already been used. Disabling parallelism to avoid deadlocks..." ); eprintln!("To disable this warning, you can either:"); eprintln!( "\t- Avoid using `tokenizers` before the fork if possible\n\ \t- Explicitly set the environment variable {}=(true | false)", ENV_VARIABLE ); set_parallelism(false); } } /// Tokenizers Module #[pymodule] pub fn tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { let _ = env_logger::try_init_from_env("TOKENIZERS_LOG"); // Register the fork callback #[cfg(target_family = "unix")] unsafe { if !REGISTERED_FORK_CALLBACK { libc::pthread_atfork(None, None, Some(child_after_fork)); REGISTERED_FORK_CALLBACK = true; } } m.add_class::<tokenizer::PyTokenizer>()?; m.add_class::<tokenizer::PyAddedToken>()?; m.add_class::<token::PyToken>()?; m.add_class::<encoding::PyEncoding>()?; m.add_class::<utils::PyRegex>()?; m.add_class::<utils::PyNormalizedString>()?; m.add_class::<utils::PyPreTokenizedString>()?; m.add_wrapped(wrap_pymodule!(models::models))?; m.add_wrapped(wrap_pymodule!(pre_tokenizers::pre_tokenizers))?; m.add_wrapped(wrap_pymodule!(decoders::decoders))?; m.add_wrapped(wrap_pymodule!(processors::processors))?; m.add_wrapped(wrap_pymodule!(normalizers::normalizers))?; m.add_wrapped(wrap_pymodule!(trainers::trainers))?; m.add("__version__", env!("CARGO_PKG_VERSION"))?; Ok(()) }
tokenizers/bindings/python/src/lib.rs/0
{ "file_path": "tokenizers/bindings/python/src/lib.rs", "repo_id": "tokenizers", "token_count": 1086 }
234
from tokenizers import ByteLevelBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, roberta_files class TestByteLevelBPE: def test_basic_encode(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"]) output = tokenizer.encode("The quick brown fox jumps over the lazy dog") assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "The", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] assert output.offsets == [ (0, 3), (3, 9), (9, 15), (15, 19), (19, 25), (25, 30), (30, 34), (34, 39), (39, 43), ] def test_add_prefix_space(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file( roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True ) output = tokenizer.encode("The quick brown fox jumps over the lazy dog") assert output.ids == [20, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "ĠThe", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] assert output.offsets == [ (0, 3), (3, 9), (9, 15), (15, 19), (19, 25), (25, 30), (30, 34), (34, 39), (39, 43), ] def test_lowerspace(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file( roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True, ) output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog") assert output.ids == [5, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "Ġthe", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] def test_multiprocessing_with_parallelism(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = ByteLevelBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["A", "Ġsentence"]
tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py/0
{ "file_path": "tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py", "repo_id": "tokenizers", "token_count": 1653 }
235
# Pre-tokenizers <tokenizerslangcontent> <python> ## BertPreTokenizer [[autodoc]] tokenizers.pre_tokenizers.BertPreTokenizer ## ByteLevel [[autodoc]] tokenizers.pre_tokenizers.ByteLevel ## CharDelimiterSplit [[autodoc]] tokenizers.pre_tokenizers.CharDelimiterSplit ## Digits [[autodoc]] tokenizers.pre_tokenizers.Digits ## Metaspace [[autodoc]] tokenizers.pre_tokenizers.Metaspace ## PreTokenizer [[autodoc]] tokenizers.pre_tokenizers.PreTokenizer ## Punctuation [[autodoc]] tokenizers.pre_tokenizers.Punctuation ## Sequence [[autodoc]] tokenizers.pre_tokenizers.Sequence ## Split [[autodoc]] tokenizers.pre_tokenizers.Split ## UnicodeScripts [[autodoc]] tokenizers.pre_tokenizers.UnicodeScripts ## Whitespace [[autodoc]] tokenizers.pre_tokenizers.Whitespace ## WhitespaceSplit [[autodoc]] tokenizers.pre_tokenizers.WhitespaceSplit </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx", "repo_id": "tokenizers", "token_count": 371 }
236
The tokenization pipeline ==================================================================================================== When calling :entity:`Tokenizer.encode` or :entity:`Tokenizer.encode_batch`, the input text(s) go through the following pipeline: - :ref:`normalization` - :ref:`pre-tokenization` - :ref:`model` - :ref:`post-processing` We'll see in details what happens during each of those steps in detail, as well as when you want to :ref:`decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you to customize each of those steps to your needs. If you're already familiar with those steps and want to learn by seeing some code, jump to :ref:`our BERT from scratch example <example>`. For the examples that require a :entity:`Tokenizer`, we will use the tokenizer we trained in the :doc:`quicktour`, which you can load with: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START reload_tokenizer :end-before: END reload_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_reload_tokenizer :end-before: END pipeline_reload_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START reload_tokenizer :end-before: END reload_tokenizer :dedent: 8 .. _normalization: Normalization ---------------------------------------------------------------------------------------------------- Normalization is, in a nutshell, a set of operations you apply to a raw string to make it less random or "cleaner". Common operations include stripping whitespace, removing accented characters or lowercasing all text. If you're familiar with `Unicode normalization <https://unicode.org/reports/tr15>`__, it is also a very common normalization operation applied in most tokenizers. Each normalization operation is represented in the 🤗 Tokenizers library by a :entity:`Normalizer`, and you can combine several of those by using a :entity:`normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization and removing accents as an example: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START setup_normalizer :end-before: END setup_normalizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_setup_normalizer :end-before: END pipeline_setup_normalizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START setup_normalizer :end-before: END setup_normalizer :dedent: 8 You can manually test that normalizer by applying it to any string: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START test_normalizer :end-before: END test_normalizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_test_normalizer :end-before: END pipeline_test_normalizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START test_normalizer :end-before: END test_normalizer :dedent: 8 When building a :entity:`Tokenizer`, you can customize its normalizer by just changing the corresponding attribute: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START replace_normalizer :end-before: END replace_normalizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_replace_normalizer :end-before: END pipeline_replace_normalizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START replace_normalizer :end-before: END replace_normalizer :dedent: 8 Of course, if you change the way a tokenizer applies normalization, you should probably retrain it from scratch afterward. .. _pre-tokenization: Pre-Tokenization ---------------------------------------------------------------------------------------------------- Pre-tokenization is the act of splitting a text into smaller objects that give an upper bound to what your tokens will be at the end of training. A good way to think of this is that the pre-tokenizer will split your text into "words" and then, your final tokens will be parts of those words. An easy way to pre-tokenize inputs is to split on spaces and punctuations, which is done by the :entity:`pre_tokenizers.Whitespace` pre-tokenizer: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START setup_pre_tokenizer :end-before: END setup_pre_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_setup_pre_tokenizer :end-before: END pipeline_setup_pre_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START setup_pre_tokenizer :end-before: END setup_pre_tokenizer :dedent: 8 The output is a list of tuples, with each tuple containing one word and its span in the original sentence (which is used to determine the final :obj:`offsets` of our :entity:`Encoding`). Note that splitting on punctuation will split contractions like :obj:`"I'm"` in this example. You can combine together any :entity:`PreTokenizer` together. For instance, here is a pre-tokenizer that will split on space, punctuation and digits, separating numbers in their individual digits: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START combine_pre_tokenizer :end-before: END combine_pre_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_combine_pre_tokenizer :end-before: END pipeline_combine_pre_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START combine_pre_tokenizer :end-before: END combine_pre_tokenizer :dedent: 8 As we saw in the :doc:`quicktour`, you can customize the pre-tokenizer of a :entity:`Tokenizer` by just changing the corresponding attribute: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START replace_pre_tokenizer :end-before: END replace_pre_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_replace_pre_tokenizer :end-before: END pipeline_replace_pre_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START replace_pre_tokenizer :end-before: END replace_pre_tokenizer :dedent: 8 Of course, if you change the way the pre-tokenizer, you should probably retrain your tokenizer from scratch afterward. .. _model: The Model ---------------------------------------------------------------------------------------------------- Once the input texts are normalized and pre-tokenized, the :entity:`Tokenizer` applies the model on the pre-tokens. This is the part of the pipeline that needs training on your corpus (or that has been trained if you are using a pretrained tokenizer). The role of the model is to split your "words" into tokens, using the rules it has learned. It's also responsible for mapping those tokens to their corresponding IDs in the vocabulary of the model. This model is passed along when intializing the :entity:`Tokenizer` so you already know how to customize this part. Currently, the 🤗 Tokenizers library supports: - :entity:`models.BPE` - :entity:`models.Unigram` - :entity:`models.WordLevel` - :entity:`models.WordPiece` For more details about each model and its behavior, you can check `here <components#models>`__ .. _post-processing: Post-Processing ---------------------------------------------------------------------------------------------------- Post-processing is the last step of the tokenization pipeline, to perform any additional transformation to the :entity:`Encoding` before it's returned, like adding potential special tokens. As we saw in the quick tour, we can customize the post processor of a :entity:`Tokenizer` by setting the corresponding attribute. For instance, here is how we can post-process to make the inputs suitable for the BERT model: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START setup_processor :end-before: END setup_processor :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_setup_processor :end-before: END pipeline_setup_processor :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START setup_processor :end-before: END setup_processor :dedent: 8 Note that contrarily to the pre-tokenizer or the normalizer, you don't need to retrain a tokenizer after changing its post-processor. .. _example: All together: a BERT tokenizer from scratch ---------------------------------------------------------------------------------------------------- Let's put all those pieces together to build a BERT tokenizer. First, BERT relies on WordPiece, so we instantiate a new :entity:`Tokenizer` with this model: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_setup_tokenizer :end-before: END bert_setup_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_setup_tokenizer :end-before: END bert_setup_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_setup_tokenizer :end-before: END bert_setup_tokenizer :dedent: 8 Then we know that BERT preprocesses texts by removing accents and lowercasing. We also use a unicode normalizer: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_setup_normalizer :end-before: END bert_setup_normalizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_setup_normalizer :end-before: END bert_setup_normalizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_setup_normalizer :end-before: END bert_setup_normalizer :dedent: 8 The pre-tokenizer is just splitting on whitespace and punctuation: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_setup_pre_tokenizer :end-before: END bert_setup_pre_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_setup_pre_tokenizer :end-before: END bert_setup_pre_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_setup_pre_tokenizer :end-before: END bert_setup_pre_tokenizer :dedent: 8 And the post-processing uses the template we saw in the previous section: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_setup_processor :end-before: END bert_setup_processor :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_setup_processor :end-before: END bert_setup_processor :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_setup_processor :end-before: END bert_setup_processor :dedent: 8 We can use this tokenizer and train on it on wikitext like in the :doc:`quicktour`: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_train_tokenizer :end-before: END bert_train_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_train_tokenizer :end-before: END bert_train_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_train_tokenizer :end-before: END bert_train_tokenizer :dedent: 8 .. _decoding: Decoding ---------------------------------------------------------------------------------------------------- .. entities:: python bert_tokenizer :obj:`bert_tokenizer` .. entities:: rust bert_tokenizer :obj:`bert_tokenizer` .. entities:: node bert_tokenizer :obj:`bertTokenizer` On top of encoding the input texts, a :entity:`Tokenizer` also has an API for decoding, that is converting IDs generated by your model back to a text. This is done by the methods :entity:`Tokenizer.decode` (for one predicted text) and :entity:`Tokenizer.decode_batch` (for a batch of predictions). The `decoder` will first convert the IDs back to tokens (using the tokenizer's vocabulary) and remove all special tokens, then join those tokens with spaces: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START test_decoding :end-before: END test_decoding :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_test_decoding :end-before: END pipeline_test_decoding :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START test_decoding :end-before: END test_decoding :dedent: 8 If you used a model that added special characters to represent subtokens of a given "word" (like the :obj:`"##"` in WordPiece) you will need to customize the `decoder` to treat them properly. If we take our previous :entity:`bert_tokenizer` for instance the default decoding will give: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_test_decoding :end-before: END bert_test_decoding :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_test_decoding :end-before: END bert_test_decoding :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_test_decoding :end-before: END bert_test_decoding :dedent: 8 But by changing it to a proper decoder, we get: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_proper_decoding :end-before: END bert_proper_decoding :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_proper_decoding :end-before: END bert_proper_decoding :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_proper_decoding :end-before: END bert_proper_decoding :dedent: 8
tokenizers/docs/source/pipeline.rst/0
{ "file_path": "tokenizers/docs/source/pipeline.rst", "repo_id": "tokenizers", "token_count": 6323 }
237
[package] name = "unstable_wasm" version = "0.1.0" authors = ["Nicolas Patry"] edition = "2018" [lib] crate-type = ["cdylib", "rlib"] [features] default = ["console_error_panic_hook"] [dependencies] wasm-bindgen = "0.2.63" # The `console_error_panic_hook` crate provides better debugging of panics by # logging them with `console.error`. This is great for development, but requires # all the `std::fmt` and `std::panicking` infrastructure, so isn't great for # code size when deploying. console_error_panic_hook = { version = "0.1.6", optional = true } # `wee_alloc` is a tiny allocator for wasm that is only ~1K in code size # compared to the default allocator's ~10K. It is slower than the default # allocator, however. # # Unfortunately, `wee_alloc` requires nightly Rust when targeting wasm for now. wee_alloc = { version = "0.4.5", optional = true } tokenizers = { path = "../../", default-features=false, features = ["unstable_wasm"]} [dev-dependencies] wasm-bindgen-test = "0.3.13" [profile.release] # Tell `rustc` to optimize for small code size. opt-level = "s"
tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml", "repo_id": "tokenizers", "token_count": 364 }
238