python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
logger = logging.get_logger(__name__)
def cosine_distance(image_embeds, text_embeds):
normalized_image_embeds = nn.functional.normalize(image_embeds)
normalized_text_embeds = nn.functional.normalize(text_embeds)
return torch.mm(normalized_image_embeds, normalized_text_embeds.t())
class StableDiffusionSafetyChecker(PreTrainedModel):
config_class = CLIPConfig
_no_split_modules = ["CLIPEncoderLayer"]
def __init__(self, config: CLIPConfig):
super().__init__(config)
self.vision_model = CLIPVisionModel(config.vision_config)
self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False)
self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False)
self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False)
self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False)
self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False)
@torch.no_grad()
def forward(self, clip_input, images):
pooled_output = self.vision_model(clip_input)[1] # pooled_output
image_embeds = self.visual_projection(pooled_output)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy()
cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy()
result = []
batch_size = image_embeds.shape[0]
for i in range(batch_size):
result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
adjustment = 0.0
for concept_idx in range(len(special_cos_dist[0])):
concept_cos = special_cos_dist[i][concept_idx]
concept_threshold = self.special_care_embeds_weights[concept_idx].item()
result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
adjustment = 0.01
for concept_idx in range(len(cos_dist[0])):
concept_cos = cos_dist[i][concept_idx]
concept_threshold = self.concept_embeds_weights[concept_idx].item()
result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(concept_idx)
result.append(result_img)
has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result]
for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
if has_nsfw_concept:
images[idx] = np.zeros(images[idx].shape) # black image
if any(has_nsfw_concepts):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed."
)
return images, has_nsfw_concepts
@torch.no_grad()
def forward_onnx(self, clip_input: torch.FloatTensor, images: torch.FloatTensor):
pooled_output = self.vision_model(clip_input)[1] # pooled_output
image_embeds = self.visual_projection(pooled_output)
special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds)
cos_dist = cosine_distance(image_embeds, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
adjustment = 0.0
special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
special_care = torch.any(special_scores > 0, dim=1)
special_adjustment = special_care * 0.01
special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
has_nsfw_concepts = torch.any(concept_scores > 0, dim=1)
images[has_nsfw_concepts] = 0.0 # black image
return images, has_nsfw_concepts
|
diffusers-main
|
src/diffusers/pipelines/stable_diffusion/safety_checker.py
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_flax_available, is_onnx_available, is_torch_available, is_transformers_available
@dataclass
class StableDiffusionPipelineOutput(BaseOutput):
"""
Output class for Stable Diffusion pipelines.
Args:
images (`List[PIL.Image.Image]` or `np.ndarray`)
List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
nsfw_content_detected (`List[bool]`)
List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, or `None` if safety checking could not be performed.
"""
images: Union[List[PIL.Image.Image], np.ndarray]
nsfw_content_detected: Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .safety_checker import StableDiffusionSafetyChecker
if is_transformers_available() and is_onnx_available():
from .pipeline_stable_diffusion_onnx import StableDiffusionOnnxPipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class FlaxStableDiffusionPipelineOutput(BaseOutput):
"""
Output class for Stable Diffusion pipelines.
Args:
images (`List[PIL.Image.Image]` or `np.ndarray`)
List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
nsfw_content_detected (`List[bool]`)
List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content.
"""
images: Union[List[PIL.Image.Image], np.ndarray]
nsfw_content_detected: List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
|
diffusers-main
|
src/diffusers/pipelines/stable_diffusion/__init__.py
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from ...configuration_utils import FrozenDict
from ...models import AutoencoderKL, UNet2DConditionModel
from ...pipeline_utils import DiffusionPipeline
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from ...utils import deprecate, logging
from . import StableDiffusionPipelineOutput
from .safety_checker import StableDiffusionSafetyChecker
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class StableDiffusionPipeline(DiffusionPipeline):
r"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None:
logger.warn(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
`attention_head_dim` must be a multiple of `slice_size`.
"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(slice_size)
def disable_attention_slicing(self):
r"""
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
back to computing attention in one step.
"""
# set slice_size = `None` to disable `attention slicing`
self.enable_attention_slicing(None)
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
height: int = 512,
width: int = 512,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[torch.Generator] = None,
latents: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: Optional[int] = 1,
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = text_embeddings.shape
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""]
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
max_length = text_input_ids.shape[-1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = uncond_embeddings.shape[1]
uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)
latents_dtype = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
self.device
)
else:
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
latents = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(num_inference_steps)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
timesteps_tensor = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
if self.safety_checker is not None:
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
self.device
)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
)
else:
has_nsfw_concept = None
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
diffusers-main
|
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py
|
import inspect
from typing import Callable, List, Optional, Union
import numpy as np
import torch
import PIL
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from ...configuration_utils import FrozenDict
from ...models import AutoencoderKL, UNet2DConditionModel
from ...pipeline_utils import DiffusionPipeline
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from ...utils import deprecate, logging
from . import StableDiffusionPipelineOutput
from .safety_checker import StableDiffusionSafetyChecker
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def preprocess(image):
w, h = image.size
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
image = np.array(image).astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return 2.0 * image - 1.0
class StableDiffusionImg2ImgPipeline(DiffusionPipeline):
r"""
Pipeline for text-guided image to image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None:
logger.warn(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
`attention_head_dim` must be a multiple of `slice_size`.
"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(slice_size)
def disable_attention_slicing(self):
r"""
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
back to computing attention in one step.
"""
# set slice_size = `None` to disable `set_attention_slice`
self.enable_attention_slicing(None)
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
init_image: Union[torch.FloatTensor, PIL.Image.Image],
strength: float = 0.8,
num_inference_steps: Optional[int] = 50,
guidance_scale: Optional[float] = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: Optional[float] = 0.0,
generator: Optional[torch.Generator] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: Optional[int] = 1,
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
init_image (`torch.FloatTensor` or `PIL.Image.Image`):
`Image`, or tensor representing an image batch, that will be used as the starting point for the
process.
strength (`float`, *optional*, defaults to 0.8):
Conceptually, indicates how much to transform the reference `init_image`. Must be between 0 and 1.
`init_image` will be used as a starting point, adding more noise to it the larger the `strength`. The
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
noise will be maximum and the denoising process will run for the full number of iterations specified in
`num_inference_steps`. A value of 1, therefore, essentially ignores `init_image`.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference. This parameter will be modulated by `strength`.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
# set timesteps
self.scheduler.set_timesteps(num_inference_steps)
if isinstance(init_image, PIL.Image.Image):
init_image = preprocess(init_image)
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt
text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""]
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError("The length of `negative_prompt` should be equal to batch_size.")
else:
uncond_tokens = negative_prompt
max_length = text_input_ids.shape[-1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size * num_images_per_prompt, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# encode the init image into latents and scale the latents
latents_dtype = text_embeddings.dtype
init_image = init_image.to(device=self.device, dtype=latents_dtype)
init_latent_dist = self.vae.encode(init_image).latent_dist
init_latents = init_latent_dist.sample(generator=generator)
init_latents = 0.18215 * init_latents
if isinstance(prompt, str):
prompt = [prompt]
if len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] == 0:
# expand init_latents for batch_size
deprecation_message = (
f"You have passed {len(prompt)} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
" images (`init_image`). Initial images are now duplicating to match the number of text prompts. Note"
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
" your script to pass as many init images as text prompts to suppress this warning."
)
deprecate("len(prompt) != len(init_image)", "1.0.0", deprecation_message, standard_warn=False)
additional_image_per_prompt = len(prompt) // init_latents.shape[0]
init_latents = torch.cat([init_latents] * additional_image_per_prompt * num_images_per_prompt, dim=0)
elif len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] != 0:
raise ValueError(
f"Cannot duplicate `init_image` of batch size {init_latents.shape[0]} to {len(prompt)} text prompts."
)
else:
init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
# get the original timestep using init_timestep
offset = self.scheduler.config.get("steps_offset", 0)
init_timestep = int(num_inference_steps * strength) + offset
init_timestep = min(init_timestep, num_inference_steps)
timesteps = self.scheduler.timesteps[-init_timestep]
timesteps = torch.tensor([timesteps] * batch_size * num_images_per_prompt, device=self.device)
# add noise to latents using the timesteps
noise = torch.randn(init_latents.shape, generator=generator, device=self.device, dtype=latents_dtype)
init_latents = self.scheduler.add_noise(init_latents, noise, timesteps)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
latents = init_latents
t_start = max(num_inference_steps - init_timestep + offset, 0)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
timesteps = self.scheduler.timesteps[t_start:].to(self.device)
for i, t in enumerate(self.progress_bar(timesteps)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if self.safety_checker is not None:
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
self.device
)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
)
else:
has_nsfw_concept = None
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
diffusers-main
|
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py
|
import inspect
from typing import Callable, List, Optional, Union
import numpy as np
from transformers import CLIPFeatureExtractor, CLIPTokenizer
from ...onnx_utils import OnnxRuntimeModel
from ...pipeline_utils import DiffusionPipeline
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from ...utils import logging
from . import StableDiffusionPipelineOutput
logger = logging.get_logger(__name__)
class StableDiffusionOnnxPipeline(DiffusionPipeline):
vae_decoder: OnnxRuntimeModel
text_encoder: OnnxRuntimeModel
tokenizer: CLIPTokenizer
unet: OnnxRuntimeModel
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler]
safety_checker: OnnxRuntimeModel
feature_extractor: CLIPFeatureExtractor
def __init__(
self,
vae_decoder: OnnxRuntimeModel,
text_encoder: OnnxRuntimeModel,
tokenizer: CLIPTokenizer,
unet: OnnxRuntimeModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
safety_checker: OnnxRuntimeModel,
feature_extractor: CLIPFeatureExtractor,
):
super().__init__()
self.register_modules(
vae_decoder=vae_decoder,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
def __call__(
self,
prompt: Union[str, List[str]],
height: Optional[int] = 512,
width: Optional[int] = 512,
num_inference_steps: Optional[int] = 50,
guidance_scale: Optional[float] = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
eta: Optional[float] = 0.0,
latents: Optional[np.ndarray] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
callback_steps: Optional[int] = 1,
**kwargs,
):
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="np",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
text_embeddings = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt] * batch_size
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
max_length = text_input_ids.shape[-1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="np",
)
uncond_embeddings = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
latents_shape = (batch_size, 4, height // 8, width // 8)
if latents is None:
latents = np.random.randn(*latents_shape).astype(np.float32)
elif latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
# set timesteps
self.scheduler.set_timesteps(num_inference_steps)
latents = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
# expand the latents if we are doing classifier free guidance
latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
sample=latent_model_input, timestep=np.array([t]), encoder_hidden_states=text_embeddings
)
noise_pred = noise_pred[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
latents = np.array(latents)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
latents = 1 / 0.18215 * latents
image = self.vae_decoder(latent_sample=latents)[0]
image = np.clip(image / 2 + 0.5, 0, 1)
image = image.transpose((0, 2, 3, 1))
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="np")
image, has_nsfw_concept = self.safety_checker(clip_input=safety_checker_input.pixel_values, images=image)
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
diffusers-main
|
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_onnx.py
|
import inspect
from typing import Callable, List, Optional, Union
import numpy as np
import torch
import PIL
from tqdm.auto import tqdm
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from ...configuration_utils import FrozenDict
from ...models import AutoencoderKL, UNet2DConditionModel
from ...pipeline_utils import DiffusionPipeline
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from ...utils import deprecate, logging
from . import StableDiffusionPipelineOutput
from .safety_checker import StableDiffusionSafetyChecker
logger = logging.get_logger(__name__)
def preprocess_image(image):
w, h = image.size
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
image = np.array(image).astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return 2.0 * image - 1.0
def preprocess_mask(mask):
mask = mask.convert("L")
w, h = mask.size
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
mask = mask.resize((w // 8, h // 8), resample=PIL.Image.NEAREST)
mask = np.array(mask).astype(np.float32) / 255.0
mask = np.tile(mask, (4, 1, 1))
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
mask = 1 - mask # repaint white, keep black
mask = torch.from_numpy(mask)
return mask
class StableDiffusionInpaintPipeline(DiffusionPipeline):
r"""
Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
):
super().__init__()
logger.info("`StableDiffusionInpaintPipeline` is experimental and will very likely change in the future.")
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None:
logger.warn(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
`attention_head_dim` must be a multiple of `slice_size`.
"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(slice_size)
def disable_attention_slicing(self):
r"""
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
back to computing attention in one step.
"""
# set slice_size = `None` to disable `set_attention_slice`
self.enable_attention_slicing(None)
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
init_image: Union[torch.FloatTensor, PIL.Image.Image],
mask_image: Union[torch.FloatTensor, PIL.Image.Image],
strength: float = 0.8,
num_inference_steps: Optional[int] = 50,
guidance_scale: Optional[float] = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: Optional[float] = 0.0,
generator: Optional[torch.Generator] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: Optional[int] = 1,
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
init_image (`torch.FloatTensor` or `PIL.Image.Image`):
`Image`, or tensor representing an image batch, that will be used as the starting point for the
process. This is the image whose masked region will be inpainted.
mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
`Image`, or tensor representing an image batch, to mask `init_image`. White pixels in the mask will be
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
strength (`float`, *optional*, defaults to 0.8):
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
is 1, the denoising process will be run on the masked area for the full number of iterations specified
in `num_inference_steps`. `init_image` will be used as a reference for the masked area, adding more
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
num_inference_steps (`int`, *optional*, defaults to 50):
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
# set timesteps
self.scheduler.set_timesteps(num_inference_steps)
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt
text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""]
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
max_length = text_input_ids.shape[-1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size * num_images_per_prompt, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# preprocess image
if not isinstance(init_image, torch.FloatTensor):
init_image = preprocess_image(init_image)
# encode the init image into latents and scale the latents
latents_dtype = text_embeddings.dtype
init_image = init_image.to(device=self.device, dtype=latents_dtype)
init_latent_dist = self.vae.encode(init_image).latent_dist
init_latents = init_latent_dist.sample(generator=generator)
init_latents = 0.18215 * init_latents
# Expand init_latents for batch_size and num_images_per_prompt
init_latents = torch.cat([init_latents] * batch_size * num_images_per_prompt, dim=0)
init_latents_orig = init_latents
# preprocess mask
if not isinstance(mask_image, torch.FloatTensor):
mask_image = preprocess_mask(mask_image)
mask_image = mask_image.to(device=self.device, dtype=latents_dtype)
mask = torch.cat([mask_image] * batch_size * num_images_per_prompt)
# check sizes
if not mask.shape == init_latents.shape:
raise ValueError("The mask and init_image should be the same size!")
# get the original timestep using init_timestep
offset = self.scheduler.config.get("steps_offset", 0)
init_timestep = int(num_inference_steps * strength) + offset
init_timestep = min(init_timestep, num_inference_steps)
timesteps = self.scheduler.timesteps[-init_timestep]
timesteps = torch.tensor([timesteps] * batch_size * num_images_per_prompt, device=self.device)
# add noise to latents using the timesteps
noise = torch.randn(init_latents.shape, generator=generator, device=self.device, dtype=latents_dtype)
init_latents = self.scheduler.add_noise(init_latents, noise, timesteps)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
latents = init_latents
t_start = max(num_inference_steps - init_timestep + offset, 0)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
timesteps = self.scheduler.timesteps[t_start:].to(self.device)
for i, t in tqdm(enumerate(timesteps)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# masking
init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
latents = (init_latents_proper * mask) + (latents * (1 - mask))
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if self.safety_checker is not None:
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
self.device
)
image, has_nsfw_concept = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values)
else:
has_nsfw_concept = None
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
diffusers-main
|
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from ...models import UNet2DModel
from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from ...schedulers import PNDMScheduler
class PNDMPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet (`UNet2DModel`): U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
The `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image.
"""
unet: UNet2DModel
scheduler: PNDMScheduler
def __init__(self, unet: UNet2DModel, scheduler: PNDMScheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
num_inference_steps: int = 50,
generator: Optional[torch.Generator] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, `optional`, defaults to 1): The number of images to generate.
num_inference_steps (`int`, `optional`, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
generator (`torch.Generator`, `optional`): A [torch
generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
output_type (`str`, `optional`, defaults to `"pil"`): The output format of the generate image. Choose
between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, `optional`, defaults to `True`): Whether or not to return a
[`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# For more information on the sampling method you can take a look at Algorithm 2 of
# the official paper: https://arxiv.org/pdf/2202.09778.pdf
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size),
generator=generator,
)
image = image.to(self.device)
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
model_output = self.unet(image, t).sample
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
|
diffusers-main
|
src/diffusers/pipelines/pndm/pipeline_pndm.py
|
# flake8: noqa
from .pipeline_pndm import PNDMPipeline
|
diffusers-main
|
src/diffusers/pipelines/pndm/__init__.py
|
# flake8: noqa
from .pipeline_ddpm import DDPMPipeline
|
diffusers-main
|
src/diffusers/pipelines/ddpm/__init__.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class DDPMPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(1000)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. compute previous image: x_t -> t_t-1
image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
|
diffusers-main
|
src/diffusers/pipelines/ddpm/pipeline_ddpm.py
|
# coding=utf-8
# Copyright 2020 Optuna, Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Logging utilities."""
import logging
import os
import sys
import threading
from logging import CRITICAL # NOQA
from logging import DEBUG # NOQA
from logging import ERROR # NOQA
from logging import FATAL # NOQA
from logging import INFO # NOQA
from logging import NOTSET # NOQA
from logging import WARN # NOQA
from logging import WARNING # NOQA
from typing import Optional
from tqdm import auto as tqdm_lib
_lock = threading.Lock()
_default_handler: Optional[logging.Handler] = None
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_default_log_level = logging.WARNING
_tqdm_active = True
def _get_default_logging_level():
"""
If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
not - fall back to `_default_log_level`
"""
env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys()) }"
)
return _default_log_level
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> logging.Logger:
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_default_handler = logging.StreamHandler() # Set sys.stderr as stream.
_default_handler.flush = sys.stderr.flush
# Apply our default configuration to the library root logger.
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
library_root_logger.propagate = False
def _reset_library_root_logger() -> None:
global _default_handler
with _lock:
if not _default_handler:
return
library_root_logger = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_default_handler = None
def get_log_levels_dict():
return log_levels
def get_logger(name: Optional[str] = None) -> logging.Logger:
"""
Return a logger with the specified name.
This function is not supposed to be directly accessed unless you are writing a custom diffusers module.
"""
if name is None:
name = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(name)
def get_verbosity() -> int:
"""
Return the current level for the 🤗 Diffusers' root logger as an int.
Returns:
`int`: The logging level.
<Tip>
🤗 Diffusers has following logging levels:
- 50: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL`
- 40: `diffusers.logging.ERROR`
- 30: `diffusers.logging.WARNING` or `diffusers.logging.WARN`
- 20: `diffusers.logging.INFO`
- 10: `diffusers.logging.DEBUG`
</Tip>"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def set_verbosity(verbosity: int) -> None:
"""
Set the verbosity level for the 🤗 Diffusers' root logger.
Args:
verbosity (`int`):
Logging level, e.g., one of:
- `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL`
- `diffusers.logging.ERROR`
- `diffusers.logging.WARNING` or `diffusers.logging.WARN`
- `diffusers.logging.INFO`
- `diffusers.logging.DEBUG`
"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(verbosity)
def set_verbosity_info():
"""Set the verbosity to the `INFO` level."""
return set_verbosity(INFO)
def set_verbosity_warning():
"""Set the verbosity to the `WARNING` level."""
return set_verbosity(WARNING)
def set_verbosity_debug():
"""Set the verbosity to the `DEBUG` level."""
return set_verbosity(DEBUG)
def set_verbosity_error():
"""Set the verbosity to the `ERROR` level."""
return set_verbosity(ERROR)
def disable_default_handler() -> None:
"""Disable the default handler of the HuggingFace Diffusers' root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def enable_default_handler() -> None:
"""Enable the default handler of the HuggingFace Diffusers' root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def add_handler(handler: logging.Handler) -> None:
"""adds a handler to the HuggingFace Diffusers' root logger."""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
"""removes given handler from the HuggingFace Diffusers' root logger."""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(handler)
def disable_propagation() -> None:
"""
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = False
def enable_propagation() -> None:
"""
Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent
double logging if the root logger has been configured.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = True
def enable_explicit_format() -> None:
"""
Enable explicit formatting for every HuggingFace Diffusers' logger. The explicit formatter is as follows:
```
[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
```
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s")
handler.setFormatter(formatter)
def reset_format() -> None:
"""
Resets the formatting for HuggingFace Diffusers' loggers.
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(None)
def warning_advice(self, *args, **kwargs):
"""
This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this
warning will not be printed
"""
no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False)
if no_advisory_warnings:
return
self.warning(*args, **kwargs)
logging.Logger.warning_advice = warning_advice
class EmptyTqdm:
"""Dummy tqdm which doesn't do anything."""
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self._iterator = args[0] if args else None
def __iter__(self):
return iter(self._iterator)
def __getattr__(self, _):
"""Return empty function."""
def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
return
class _tqdm_cls:
def __call__(self, *args, **kwargs):
if _tqdm_active:
return tqdm_lib.tqdm(*args, **kwargs)
else:
return EmptyTqdm(*args, **kwargs)
def set_lock(self, *args, **kwargs):
self._lock = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*args, **kwargs)
def get_lock(self):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
tqdm = _tqdm_cls()
def is_progress_bar_enabled() -> bool:
"""Return a boolean indicating whether tqdm progress bars are enabled."""
global _tqdm_active
return bool(_tqdm_active)
def enable_progress_bar():
"""Enable tqdm progress bar."""
global _tqdm_active
_tqdm_active = True
def disable_progress_bar():
"""Disable tqdm progress bar."""
global _tqdm_active
_tqdm_active = False
|
diffusers-main
|
src/diffusers/utils/logging.py
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
# flake8: noqa
from ..utils import DummyObject, requires_backends
class StableDiffusionOnnxPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers", "onnx"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers", "onnx"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "onnx"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "onnx"])
|
diffusers-main
|
src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
# flake8: noqa
from ..utils import DummyObject, requires_backends
class ModelMixin(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoencoderKL(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class UNet2DConditionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class UNet2DModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class VQModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def get_constant_schedule(*args, **kwargs):
requires_backends(get_constant_schedule, ["torch"])
def get_constant_schedule_with_warmup(*args, **kwargs):
requires_backends(get_constant_schedule_with_warmup, ["torch"])
def get_cosine_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_schedule_with_warmup, ["torch"])
def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"])
def get_linear_schedule_with_warmup(*args, **kwargs):
requires_backends(get_linear_schedule_with_warmup, ["torch"])
def get_polynomial_decay_schedule_with_warmup(*args, **kwargs):
requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"])
def get_scheduler(*args, **kwargs):
requires_backends(get_scheduler, ["torch"])
class DiffusionPipeline(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DDIMPipeline(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DDPMPipeline(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class KarrasVePipeline(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LDMPipeline(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class PNDMPipeline(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ScoreSdeVePipeline(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DDIMScheduler(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DDPMScheduler(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class KarrasVeScheduler(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class PNDMScheduler(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SchedulerMixin(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ScoreSdeVeScheduler(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class EMAModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
|
diffusers-main
|
src/diffusers/utils/dummy_pt_objects.py
|
import inspect
import os
import random
import re
import unittest
from distutils.util import strtobool
from pathlib import Path
from typing import Union
import PIL.Image
import PIL.ImageOps
import requests
from packaging import version
from .import_utils import is_flax_available, is_torch_available
global_rng = random.Random()
if is_torch_available():
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
is_torch_higher_equal_than_1_12 = version.parse(version.parse(torch.__version__).base_version) >= version.parse(
"1.12"
)
if is_torch_higher_equal_than_1_12:
torch_device = "mps" if torch.backends.mps.is_available() else torch_device
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
"""
Args:
Loads `image` to a PIL Image.
image (`str` or `PIL.Image.Image`):
The image to convert to the PIL Image format.
Returns:
`PIL.Image.Image`: A PIL Image.
"""
if isinstance(image, str):
if image.startswith("http://") or image.startswith("https://"):
image = PIL.Image.open(requests.get(image, stream=True).raw)
elif os.path.isfile(image):
image = PIL.Image.open(image)
else:
raise ValueError(
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
)
elif isinstance(image, PIL.Image.Image):
image = image
else:
raise ValueError(
"Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
)
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
|
diffusers-main
|
src/diffusers/utils/testing_utils.py
|
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .deprecation_utils import deprecate
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
is_accelerate_available,
is_flax_available,
is_inflect_available,
is_modelcards_available,
is_onnx_available,
is_scipy_available,
is_tf_available,
is_torch_available,
is_transformers_available,
is_unidecode_available,
requires_backends,
)
from .logging import get_logger
from .outputs import BaseOutput
if is_torch_available():
from .testing_utils import floats_tensor, load_image, parse_flag_from_env, slow, torch_device
logger = get_logger(__name__)
hf_cache_home = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
default_cache_path = os.path.join(hf_cache_home, "diffusers")
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = "https://huggingface.co"
DIFFUSERS_CACHE = default_cache_path
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
|
diffusers-main
|
src/diffusers/utils/__init__.py
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
# flake8: noqa
from ..utils import DummyObject, requires_backends
class LDMTextToImagePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
|
diffusers-main
|
src/diffusers/utils/dummy_torch_and_transformers_objects.py
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
# flake8: noqa
from ..utils import DummyObject, requires_backends
class FlaxStableDiffusionPipeline(metaclass=DummyObject):
_backends = ["flax", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax", "transformers"])
|
diffusers-main
|
src/diffusers/utils/dummy_flax_and_transformers_objects.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Import utilities: Utilities related to imports and our lazy inits.
"""
import importlib.util
import os
import sys
from collections import OrderedDict
from packaging import version
from . import logging
# The package importlib_metadata is in a different place, depending on the python version.
if sys.version_info < (3, 8):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
_torch_version = "N/A"
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
_torch_available = importlib.util.find_spec("torch") is not None
if _torch_available:
try:
_torch_version = importlib_metadata.version("torch")
logger.info(f"PyTorch version {_torch_version} available.")
except importlib_metadata.PackageNotFoundError:
_torch_available = False
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
_tf_version = "N/A"
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
_tf_available = importlib.util.find_spec("tensorflow") is not None
if _tf_available:
candidates = (
"tensorflow",
"tensorflow-cpu",
"tensorflow-gpu",
"tf-nightly",
"tf-nightly-cpu",
"tf-nightly-gpu",
"intel-tensorflow",
"intel-tensorflow-avx512",
"tensorflow-rocm",
"tensorflow-macos",
"tensorflow-aarch64",
)
_tf_version = None
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
for pkg in candidates:
try:
_tf_version = importlib_metadata.version(pkg)
break
except importlib_metadata.PackageNotFoundError:
pass
_tf_available = _tf_version is not None
if _tf_available:
if version.parse(_tf_version) < version.parse("2"):
logger.info(f"TensorFlow found but with version {_tf_version}. Diffusers requires version 2 minimum.")
_tf_available = False
else:
logger.info(f"TensorFlow version {_tf_version} available.")
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
_flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None
if _flax_available:
try:
_jax_version = importlib_metadata.version("jax")
_flax_version = importlib_metadata.version("flax")
logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.")
except importlib_metadata.PackageNotFoundError:
_flax_available = False
else:
_flax_available = False
_transformers_available = importlib.util.find_spec("transformers") is not None
try:
_transformers_version = importlib_metadata.version("transformers")
logger.debug(f"Successfully imported transformers version {_transformers_version}")
except importlib_metadata.PackageNotFoundError:
_transformers_available = False
_inflect_available = importlib.util.find_spec("inflect") is not None
try:
_inflect_version = importlib_metadata.version("inflect")
logger.debug(f"Successfully imported inflect version {_inflect_version}")
except importlib_metadata.PackageNotFoundError:
_inflect_available = False
_unidecode_available = importlib.util.find_spec("unidecode") is not None
try:
_unidecode_version = importlib_metadata.version("unidecode")
logger.debug(f"Successfully imported unidecode version {_unidecode_version}")
except importlib_metadata.PackageNotFoundError:
_unidecode_available = False
_modelcards_available = importlib.util.find_spec("modelcards") is not None
try:
_modelcards_version = importlib_metadata.version("modelcards")
logger.debug(f"Successfully imported modelcards version {_modelcards_version}")
except importlib_metadata.PackageNotFoundError:
_modelcards_available = False
_onnx_available = importlib.util.find_spec("onnxruntime") is not None
if _onnx_available:
candidates = ("onnxruntime", "onnxruntime-gpu", "onnxruntime-directml", "onnxruntime-openvino")
_onnxruntime_version = None
# For the metadata, we have to look for both onnxruntime and onnxruntime-gpu
for pkg in candidates:
try:
_onnxruntime_version = importlib_metadata.version(pkg)
break
except importlib_metadata.PackageNotFoundError:
pass
_onnx_available = _onnxruntime_version is not None
if _onnx_available:
logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}")
_scipy_available = importlib.util.find_spec("scipy") is not None
try:
_scipy_version = importlib_metadata.version("scipy")
logger.debug(f"Successfully imported transformers version {_scipy_version}")
except importlib_metadata.PackageNotFoundError:
_scipy_available = False
_accelerate_available = importlib.util.find_spec("accelerate") is not None
try:
_accelerate_version = importlib_metadata.version("accelerate")
logger.debug(f"Successfully imported accelerate version {_accelerate_version}")
except importlib_metadata.PackageNotFoundError:
_accelerate_available = False
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
def is_flax_available():
return _flax_available
def is_transformers_available():
return _transformers_available
def is_inflect_available():
return _inflect_available
def is_unidecode_available():
return _unidecode_available
def is_modelcards_available():
return _modelcards_available
def is_onnx_available():
return _onnx_available
def is_scipy_available():
return _scipy_available
def is_accelerate_available():
return _accelerate_available
# docstyle-ignore
FLAX_IMPORT_ERROR = """
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
installation page: https://github.com/google/flax and follow the ones that match your environment.
"""
# docstyle-ignore
INFLECT_IMPORT_ERROR = """
{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install
inflect`
"""
# docstyle-ignore
PYTORCH_IMPORT_ERROR = """
{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the
installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
"""
# docstyle-ignore
ONNX_IMPORT_ERROR = """
{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip
install onnxruntime`
"""
# docstyle-ignore
SCIPY_IMPORT_ERROR = """
{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install
scipy`
"""
# docstyle-ignore
TENSORFLOW_IMPORT_ERROR = """
{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the
installation page: https://www.tensorflow.org/install and follow the ones that match your environment.
"""
# docstyle-ignore
TRANSFORMERS_IMPORT_ERROR = """
{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip
install transformers`
"""
# docstyle-ignore
UNIDECODE_IMPORT_ERROR = """
{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install
Unidecode`
"""
BACKENDS_MAPPING = OrderedDict(
[
("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)),
("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)),
("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)),
("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)),
("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)),
("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)),
]
)
def requires_backends(obj, backends):
if not isinstance(backends, (list, tuple)):
backends = [backends]
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
checks = (BACKENDS_MAPPING[backend] for backend in backends)
failed = [msg.format(name) for available, msg in checks if not available()]
if failed:
raise ImportError("".join(failed))
class DummyObject(type):
"""
Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by
`requires_backend` each time a user tries to access any method of that class.
"""
def __getattr__(cls, key):
if key.startswith("_"):
return super().__getattr__(cls, key)
requires_backends(cls, cls._backends)
|
diffusers-main
|
src/diffusers/utils/import_utils.py
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
# flake8: noqa
from ..utils import DummyObject, requires_backends
class LMSDiscreteScheduler(metaclass=DummyObject):
_backends = ["torch", "scipy"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "scipy"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "scipy"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "scipy"])
|
diffusers-main
|
src/diffusers/utils/dummy_torch_and_scipy_objects.py
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True):
from .. import __version__
deprecated_kwargs = take_from
values = ()
if not isinstance(args[0], tuple):
args = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}"
)
warning = None
if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(attribute),)
warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(deprecated_kwargs, attribute):
values += (getattr(deprecated_kwargs, attribute),)
warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
warning = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
warning = warning + " " if standard_warn else ""
warnings.warn(warning + message, DeprecationWarning)
if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:
call_frame = inspect.getouterframes(inspect.currentframe())[1]
filename = call_frame.filename
line_number = call_frame.lineno
function = call_frame.function
key, value = next(iter(deprecated_kwargs.items()))
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`")
if len(values) == 0:
return
elif len(values) == 1:
return values[0]
return values
|
diffusers-main
|
src/diffusers/utils/deprecation_utils.py
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
# flake8: noqa
from ..utils import DummyObject, requires_backends
class FlaxModelMixin(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxUNet2DConditionModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxAutoencoderKL(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxDiffusionPipeline(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxDDIMScheduler(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxDDPMScheduler(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxKarrasVeScheduler(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxLMSDiscreteScheduler(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxPNDMScheduler(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxSchedulerMixin(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
class FlaxScoreSdeVeScheduler(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["flax"])
|
diffusers-main
|
src/diffusers/utils/dummy_flax_objects.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic utilities
"""
from collections import OrderedDict
from dataclasses import fields
from typing import Any, Tuple
import numpy as np
from .import_utils import is_torch_available
def is_tensor(x):
"""
Tests if `x` is a `torch.Tensor` or `np.ndarray`.
"""
if is_torch_available():
import torch
if isinstance(x, torch.Tensor):
return True
return isinstance(x, np.ndarray)
class BaseOutput(OrderedDict):
"""
Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a
tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular
python dictionary.
<Tip warning={true}>
You can't unpack a `BaseOutput` directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple
before.
</Tip>
"""
def __post_init__(self):
class_fields = fields(self)
# Safety and consistency checks
if not len(class_fields):
raise ValueError(f"{self.__class__.__name__} has no fields.")
first_field = getattr(self, class_fields[0].name)
other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and isinstance(first_field, dict):
for key, value in first_field.items():
self[key] = value
else:
for field in class_fields:
v = getattr(self, field.name)
if v is not None:
self[field.name] = v
def __delitem__(self, *args, **kwargs):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def setdefault(self, *args, **kwargs):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def pop(self, *args, **kwargs):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def update(self, *args, **kwargs):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__(self, k):
if isinstance(k, str):
inner_dict = {k: v for (k, v) in self.items()}
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self, name, value):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(name, value)
super().__setattr__(name, value)
def __setitem__(self, key, value):
# Will raise a KeyException if needed
super().__setitem__(key, value)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(key, value)
def to_tuple(self) -> Tuple[Any]:
"""
Convert self to a tuple containing all the attributes/keys that are not `None`.
"""
return tuple(self[k] for k in self.keys())
|
diffusers-main
|
src/diffusers/utils/outputs.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange
# TODO(danfu): upstream this to FlashAttention
def check_cuda():
if not torch.cuda.is_available():
raise RuntimeError('CUDA is not available')
cur_device = torch.cuda.current_device()
dprops = torch.cuda.get_device_properties(cur_device)
is_sm75 = dprops.major == 7 and dprops.minor == 5
is_sm8x = dprops.major == 8 and dprops.minor >= 0
return is_sm8x or is_sm75
try:
from flash_attn.flash_attn_interface import flash_attn_unpadded_kvpacked_func
from flash_attn.bert_padding import unpad_input, pad_input
from flash_attn.flash_attention import FlashAttention
flash_attn_installed = check_cuda()
except ImportError:
flash_attn_installed = False
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other. Originally ported from here, but adapted
to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
Uses three q, k, v linear layers to compute attention.
Parameters:
channels (:obj:`int`): The number of channels in the input and output.
num_head_channels (:obj:`int`, *optional*):
The number of channels in each head. If None, then `num_heads` = 1.
num_groups (:obj:`int`, *optional*, defaults to 32): The number of groups to use for group norm.
rescale_output_factor (:obj:`float`, *optional*, defaults to 1.0): The factor to rescale the output by.
eps (:obj:`float`, *optional*, defaults to 1e-5): The epsilon value to use for group norm.
"""
def __init__(
self,
channels: int,
num_head_channels: Optional[int] = None,
num_groups: int = 32,
rescale_output_factor: float = 1.0,
eps: float = 1e-5,
):
super().__init__()
self.channels = channels
self.num_heads = channels // num_head_channels if num_head_channels is not None else 1
self.num_head_size = num_head_channels
self.group_norm = nn.GroupNorm(num_channels=channels, num_groups=num_groups, eps=eps, affine=True)
# define q,k,v as linear layers
self.query = nn.Linear(channels, channels)
self.key = nn.Linear(channels, channels)
self.value = nn.Linear(channels, channels)
self.rescale_output_factor = rescale_output_factor
self.proj_attn = nn.Linear(channels, channels, 1)
def transpose_for_scores(self, projection: torch.Tensor) -> torch.Tensor:
new_projection_shape = projection.size()[:-1] + (self.num_heads, -1)
# move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D)
new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3)
return new_projection
def forward(self, hidden_states):
residual = hidden_states
batch, channel, height, width = hidden_states.shape
# norm
hidden_states = self.group_norm(hidden_states)
hidden_states = hidden_states.view(batch, channel, height * width).transpose(1, 2)
# proj to q, k, v
query_proj = self.query(hidden_states)
key_proj = self.key(hidden_states)
value_proj = self.value(hidden_states)
# transpose
query_states = self.transpose_for_scores(query_proj)
key_states = self.transpose_for_scores(key_proj)
value_states = self.transpose_for_scores(value_proj)
# get scores
scale = 1 / math.sqrt(math.sqrt(self.channels / self.num_heads))
attention_scores = torch.matmul(query_states * scale, key_states.transpose(-1, -2) * scale) # TODO: use baddmm
attention_probs = torch.softmax(attention_scores.float(), dim=-1).type(attention_scores.dtype)
# compute attention output
hidden_states = torch.matmul(attention_probs, value_states)
hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous()
new_hidden_states_shape = hidden_states.size()[:-2] + (self.channels,)
hidden_states = hidden_states.view(new_hidden_states_shape)
# compute next hidden_states
hidden_states = self.proj_attn(hidden_states)
hidden_states = hidden_states.transpose(-1, -2).reshape(batch, channel, height, width)
# res connect and rescale
hidden_states = (hidden_states + residual) / self.rescale_output_factor
return hidden_states
class SpatialTransformer(nn.Module):
"""
Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply
standard transformer action. Finally, reshape to image.
Parameters:
in_channels (:obj:`int`): The number of channels in the input and output.
n_heads (:obj:`int`): The number of heads to use for multi-head attention.
d_head (:obj:`int`): The number of channels in each head.
depth (:obj:`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
dropout (:obj:`float`, *optional*, defaults to 0.1): The dropout probability to use.
context_dim (:obj:`int`, *optional*): The number of context dimensions to use.
"""
def __init__(
self,
in_channels: int,
n_heads: int,
d_head: int,
depth: int = 1,
dropout: float = 0.0,
num_groups: int = 32,
context_dim: Optional[int] = None,
):
super().__init__()
self.n_heads = n_heads
self.d_head = d_head
self.in_channels = in_channels
inner_dim = n_heads * d_head
self.norm = torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
self.transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)
for d in range(depth)
]
)
self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
def _set_attention_slice(self, slice_size):
for block in self.transformer_blocks:
block._set_attention_slice(slice_size)
def forward(self, hidden_states, context=None):
# note: if no context is given, cross-attention defaults to self-attention
batch, channel, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
hidden_states = self.proj_in(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
for block in self.transformer_blocks:
hidden_states = block(hidden_states, context=context)
hidden_states = hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2)
hidden_states = self.proj_out(hidden_states)
return hidden_states + residual
class BasicTransformerBlock(nn.Module):
r"""
A basic Transformer block.
Parameters:
dim (:obj:`int`): The number of channels in the input and output.
n_heads (:obj:`int`): The number of heads to use for multi-head attention.
d_head (:obj:`int`): The number of channels in each head.
dropout (:obj:`float`, *optional*, defaults to 0.0): The dropout probability to use.
context_dim (:obj:`int`, *optional*): The size of the context vector for cross attention.
gated_ff (:obj:`bool`, *optional*, defaults to :obj:`False`): Whether to use a gated feed-forward network.
checkpoint (:obj:`bool`, *optional*, defaults to :obj:`False`): Whether to use checkpointing.
"""
def __init__(
self,
dim: int,
n_heads: int,
d_head: int,
dropout=0.0,
context_dim: Optional[int] = None,
gated_ff: bool = True,
checkpoint: bool = True,
):
super().__init__()
self.attn1 = CrossAttention(
query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout
) # is a self-attention
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
self.attn2 = CrossAttention(
query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout
) # is self-attn if context is none
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.norm3 = nn.LayerNorm(dim)
self.checkpoint = checkpoint
def _set_attention_slice(self, slice_size):
self.attn1._slice_size = slice_size
self.attn2._slice_size = slice_size
def forward(self, hidden_states, context=None):
hidden_states = hidden_states.contiguous() if hidden_states.device.type == "mps" else hidden_states
hidden_states = self.attn1(self.norm1(hidden_states)) + hidden_states
hidden_states = self.attn2(self.norm2(hidden_states), context=context) + hidden_states
hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
return hidden_states
class CrossAttention(nn.Module):
r"""
A cross attention layer.
Parameters:
query_dim (:obj:`int`): The number of channels in the query.
context_dim (:obj:`int`, *optional*):
The number of channels in the context. If not given, defaults to `query_dim`.
heads (:obj:`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention.
dim_head (:obj:`int`, *optional*, defaults to 64): The number of channels in each head.
dropout (:obj:`float`, *optional*, defaults to 0.0): The dropout probability to use.
"""
def __init__(
self, query_dim: int, context_dim: Optional[int] = None, heads: int = 8, dim_head: int = 64, dropout: int = 0.0
):
super().__init__()
inner_dim = dim_head * heads
context_dim = context_dim if context_dim is not None else query_dim
self.scale = dim_head**-0.5
self.heads = heads
# for slice_size > 0 the attention score computation
# is split across the batch axis to save memory
# You can set slice_size with `set_attention_slice`
self._slice_size = None
self.dim_head = dim_head
self.context_dim = context_dim
self.query_dim = query_dim
if self.context_dim == self.query_dim and self.dim_head <= 128 and (self.dim_head % 8) == 0 and flash_attn_installed:
self.flash_attn = FlashAttention(self.scale)
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
def reshape_heads_to_batch_dim(self, tensor):
batch_size, seq_len, dim = tensor.shape
head_size = self.heads
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size)
return tensor
def reshape_batch_dim_to_heads(self, tensor):
batch_size, seq_len, dim = tensor.shape
head_size = self.heads
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)
return tensor
def forward(self, hidden_states, context=None, mask=None):
batch_size, sequence_length, _ = hidden_states.shape
query = self.to_q(hidden_states)
context = context if context is not None else hidden_states
key = self.to_k(context)
value = self.to_v(context)
dim = query.shape[-1]
# TODO(PVP) - mask is currently never used. Remember to re-implement when used
# attention, what we cannot get enough of
if self._slice_size is None or query.shape[0] // self._slice_size == 1:
hidden_states = self._attention(query, key, value)
else:
hidden_states = self._sliced_attention(query, key, value, sequence_length, dim)
return self.to_out(hidden_states)
def _attention(self, query, key, value):
batch_size = query.shape[0]
if not flash_attn_installed or query.dtype == torch.float32 or (self.dim_head > 128 or (self.dim_head % 8) != 0):
query = self.reshape_heads_to_batch_dim(query)
key = self.reshape_heads_to_batch_dim(key)
value = self.reshape_heads_to_batch_dim(value)
# TODO: use baddbmm for better performance
attention_scores = torch.matmul(query, key.transpose(-1, -2)) * self.scale
attention_probs = attention_scores.softmax(dim=-1)
# compute attention output
hidden_states = torch.matmul(attention_probs, value)
# reshape hidden_states
out = self.reshape_batch_dim_to_heads(hidden_states)
elif self.context_dim == self.query_dim:
qkv = torch.stack([
query, key, value
], dim=2)
qkv = rearrange(qkv, 'b s t (h d) -> b s t h d', h=self.heads)
out, _ = self.flash_attn(qkv)
out = rearrange(out, 'b s h d -> b s (h d)', h=self.heads)
else:
h = self.heads
kv = torch.stack([key, value], dim=2)
q_seqlen = query.shape[1]
kv_seqlen = kv.shape[1]
q = rearrange(query, 'b s (h d) -> (b s) h d', h=h)
kv = rearrange(kv, 'b s t (h d) -> (b s) t h d', h=h)
cu_seqlens_q = torch.arange(0, (batch_size + 1) * q_seqlen, step=q_seqlen, dtype=torch.int32, device=q.device)
cu_seqlens_k = torch.arange(0, (batch_size + 1) * kv_seqlen, step=kv_seqlen, dtype=torch.int32, device=kv.device)
out = flash_attn_unpadded_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_k, q_seqlen, kv_seqlen, 0.0, self.scale)
out = rearrange(out, '(b s) h d -> b s (h d)', b = batch_size, h = h)
return out
def _sliced_attention(self, query, key, value, sequence_length, dim):
batch_size_attention = query.shape[0]
hidden_states = torch.zeros(
(batch_size_attention, sequence_length, dim // self.heads), device=query.device, dtype=query.dtype
)
slice_size = self._slice_size if self._slice_size is not None else hidden_states.shape[0]
for i in range(hidden_states.shape[0] // slice_size):
start_idx = i * slice_size
end_idx = (i + 1) * slice_size
attn_slice = (
torch.matmul(query[start_idx:end_idx], key[start_idx:end_idx].transpose(1, 2)) * self.scale
) # TODO: use baddbmm for better performance
attn_slice = attn_slice.softmax(dim=-1)
attn_slice = torch.matmul(attn_slice, value[start_idx:end_idx])
hidden_states[start_idx:end_idx] = attn_slice
# reshape hidden_states
hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
return hidden_states
class FeedForward(nn.Module):
r"""
A feed-forward layer.
Parameters:
dim (:obj:`int`): The number of channels in the input.
dim_out (:obj:`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
mult (:obj:`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
glu (:obj:`bool`, *optional*, defaults to :obj:`False`): Whether to use GLU activation.
dropout (:obj:`float`, *optional*, defaults to 0.0): The dropout probability to use.
"""
def __init__(
self, dim: int, dim_out: Optional[int] = None, mult: int = 4, glu: bool = False, dropout: float = 0.0
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = dim_out if dim_out is not None else dim
project_in = GEGLU(dim, inner_dim)
self.net = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out))
def forward(self, hidden_states):
return self.net(hidden_states)
# feedforward
class GEGLU(nn.Module):
r"""
A variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202.
Parameters:
dim_in (:obj:`int`): The number of channels in the input.
dim_out (:obj:`int`): The number of channels in the output.
"""
def __init__(self, dim_in: int, dim_out: int):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, hidden_states):
hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
return hidden_states * F.gelu(gate)
|
diffusers-main
|
src/diffusers/models/attention.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.utils.checkpoint
from ..configuration_utils import ConfigMixin, register_to_config
from ..modeling_utils import ModelMixin
from ..utils import BaseOutput, logging
from .embeddings import TimestepEmbedding, Timesteps
from .unet_blocks import (
CrossAttnDownBlock2D,
CrossAttnUpBlock2D,
DownBlock2D,
UNetMidBlock2DCrossAttn,
UpBlock2D,
get_down_block,
get_up_block,
)
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet2DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet2DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int`, *optional*): The size of the input sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
Whether to flip the sin to cos in the time embedding.
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: int = 8,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim,
downsample_padding=downsample_padding,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = UNetMidBlock2DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift="default",
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim,
resnet_groups=norm_num_groups,
)
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
def set_attention_slice(self, slice_size):
if slice_size is not None and self.config.attention_head_dim % slice_size != 0:
raise ValueError(
f"Make sure slice_size {slice_size} is a divisor of "
f"the number of heads used in cross_attention {self.config.attention_head_dim}"
)
if slice_size is not None and slice_size > self.config.attention_head_dim:
raise ValueError(
f"Chunk_size {slice_size} has to be smaller or equal to "
f"the number of heads used in cross_attention {self.config.attention_head_dim}"
)
for block in self.down_blocks:
if hasattr(block, "attentions") and block.attentions is not None:
block.set_attention_slice(slice_size)
self.mid_block.set_attention_slice(slice_size)
for block in self.up_blocks:
if hasattr(block, "attentions") and block.attentions is not None:
block.set_attention_slice(slice_size)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D, CrossAttnUpBlock2D, UpBlock2D)):
module.gradient_checkpointing = value
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
return_dict: bool = True,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, channel, height, width) encoder hidden states
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
logger.info("Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb)
# 2. pre-process
sample = self.conv_in(sample)
# 3. down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "attentions") and downsample_block.attentions is not None:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
# 4. mid
sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states)
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "attentions") and upsample_block.attentions is not None:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
upsample_size=upsample_size,
)
else:
sample = upsample_block(
hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
)
# 6. post-process
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if not return_dict:
return (sample,)
return UNet2DConditionOutput(sample=sample)
|
diffusers-main
|
src/diffusers/models/unet_2d_condition.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxSpatialTransformer
from .resnet_flax import FlaxDownsample2D, FlaxResnetBlock2D, FlaxUpsample2D
class FlaxCrossAttnDownBlock2D(nn.Module):
r"""
Cross Attention 2D Downsizing block - original architecture from Unet transformers:
https://arxiv.org/abs/2103.06104
Parameters:
in_channels (:obj:`int`):
Input channels
out_channels (:obj:`int`):
Output channels
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
num_layers (:obj:`int`, *optional*, defaults to 1):
Number of attention blocks layers
attn_num_head_channels (:obj:`int`, *optional*, defaults to 1):
Number of attention heads of each spatial transformer block
add_downsample (:obj:`bool`, *optional*, defaults to `True`):
Whether to add downsampling layer before each final output
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
out_channels: int
dropout: float = 0.0
num_layers: int = 1
attn_num_head_channels: int = 1
add_downsample: bool = True
dtype: jnp.dtype = jnp.float32
def setup(self):
resnets = []
attentions = []
for i in range(self.num_layers):
in_channels = self.in_channels if i == 0 else self.out_channels
res_block = FlaxResnetBlock2D(
in_channels=in_channels,
out_channels=self.out_channels,
dropout_prob=self.dropout,
dtype=self.dtype,
)
resnets.append(res_block)
attn_block = FlaxSpatialTransformer(
in_channels=self.out_channels,
n_heads=self.attn_num_head_channels,
d_head=self.out_channels // self.attn_num_head_channels,
depth=1,
dtype=self.dtype,
)
attentions.append(attn_block)
self.resnets = resnets
self.attentions = attentions
if self.add_downsample:
self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)
def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True):
output_states = ()
for resnet, attn in zip(self.resnets, self.attentions):
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)
output_states += (hidden_states,)
if self.add_downsample:
hidden_states = self.downsamplers_0(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class FlaxDownBlock2D(nn.Module):
r"""
Flax 2D downsizing block
Parameters:
in_channels (:obj:`int`):
Input channels
out_channels (:obj:`int`):
Output channels
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
num_layers (:obj:`int`, *optional*, defaults to 1):
Number of attention blocks layers
add_downsample (:obj:`bool`, *optional*, defaults to `True`):
Whether to add downsampling layer before each final output
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
out_channels: int
dropout: float = 0.0
num_layers: int = 1
add_downsample: bool = True
dtype: jnp.dtype = jnp.float32
def setup(self):
resnets = []
for i in range(self.num_layers):
in_channels = self.in_channels if i == 0 else self.out_channels
res_block = FlaxResnetBlock2D(
in_channels=in_channels,
out_channels=self.out_channels,
dropout_prob=self.dropout,
dtype=self.dtype,
)
resnets.append(res_block)
self.resnets = resnets
if self.add_downsample:
self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)
def __call__(self, hidden_states, temb, deterministic=True):
output_states = ()
for resnet in self.resnets:
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
output_states += (hidden_states,)
if self.add_downsample:
hidden_states = self.downsamplers_0(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class FlaxCrossAttnUpBlock2D(nn.Module):
r"""
Cross Attention 2D Upsampling block - original architecture from Unet transformers:
https://arxiv.org/abs/2103.06104
Parameters:
in_channels (:obj:`int`):
Input channels
out_channels (:obj:`int`):
Output channels
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
num_layers (:obj:`int`, *optional*, defaults to 1):
Number of attention blocks layers
attn_num_head_channels (:obj:`int`, *optional*, defaults to 1):
Number of attention heads of each spatial transformer block
add_upsample (:obj:`bool`, *optional*, defaults to `True`):
Whether to add upsampling layer before each final output
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
out_channels: int
prev_output_channel: int
dropout: float = 0.0
num_layers: int = 1
attn_num_head_channels: int = 1
add_upsample: bool = True
dtype: jnp.dtype = jnp.float32
def setup(self):
resnets = []
attentions = []
for i in range(self.num_layers):
res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels
resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels
res_block = FlaxResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=self.out_channels,
dropout_prob=self.dropout,
dtype=self.dtype,
)
resnets.append(res_block)
attn_block = FlaxSpatialTransformer(
in_channels=self.out_channels,
n_heads=self.attn_num_head_channels,
d_head=self.out_channels // self.attn_num_head_channels,
depth=1,
dtype=self.dtype,
)
attentions.append(attn_block)
self.resnets = resnets
self.attentions = attentions
if self.add_upsample:
self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype)
def __call__(self, hidden_states, res_hidden_states_tuple, temb, encoder_hidden_states, deterministic=True):
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1)
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)
if self.add_upsample:
hidden_states = self.upsamplers_0(hidden_states)
return hidden_states
class FlaxUpBlock2D(nn.Module):
r"""
Flax 2D upsampling block
Parameters:
in_channels (:obj:`int`):
Input channels
out_channels (:obj:`int`):
Output channels
prev_output_channel (:obj:`int`):
Output channels from the previous block
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
num_layers (:obj:`int`, *optional*, defaults to 1):
Number of attention blocks layers
add_downsample (:obj:`bool`, *optional*, defaults to `True`):
Whether to add downsampling layer before each final output
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
out_channels: int
prev_output_channel: int
dropout: float = 0.0
num_layers: int = 1
add_upsample: bool = True
dtype: jnp.dtype = jnp.float32
def setup(self):
resnets = []
for i in range(self.num_layers):
res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels
resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels
res_block = FlaxResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=self.out_channels,
dropout_prob=self.dropout,
dtype=self.dtype,
)
resnets.append(res_block)
self.resnets = resnets
if self.add_upsample:
self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype)
def __call__(self, hidden_states, res_hidden_states_tuple, temb, deterministic=True):
for resnet in self.resnets:
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1)
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
if self.add_upsample:
hidden_states = self.upsamplers_0(hidden_states)
return hidden_states
class FlaxUNetMidBlock2DCrossAttn(nn.Module):
r"""
Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104
Parameters:
in_channels (:obj:`int`):
Input channels
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
num_layers (:obj:`int`, *optional*, defaults to 1):
Number of attention blocks layers
attn_num_head_channels (:obj:`int`, *optional*, defaults to 1):
Number of attention heads of each spatial transformer block
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
dropout: float = 0.0
num_layers: int = 1
attn_num_head_channels: int = 1
dtype: jnp.dtype = jnp.float32
def setup(self):
# there is always at least one resnet
resnets = [
FlaxResnetBlock2D(
in_channels=self.in_channels,
out_channels=self.in_channels,
dropout_prob=self.dropout,
dtype=self.dtype,
)
]
attentions = []
for _ in range(self.num_layers):
attn_block = FlaxSpatialTransformer(
in_channels=self.in_channels,
n_heads=self.attn_num_head_channels,
d_head=self.in_channels // self.attn_num_head_channels,
depth=1,
dtype=self.dtype,
)
attentions.append(attn_block)
res_block = FlaxResnetBlock2D(
in_channels=self.in_channels,
out_channels=self.in_channels,
dropout_prob=self.dropout,
dtype=self.dtype,
)
resnets.append(res_block)
self.resnets = resnets
self.attentions = attentions
def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True):
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
return hidden_states
|
diffusers-main
|
src/diffusers/models/unet_blocks_flax.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flax.linen as nn
import jax.numpy as jnp
class FlaxAttentionBlock(nn.Module):
r"""
A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762
Parameters:
query_dim (:obj:`int`):
Input hidden states dimension
heads (:obj:`int`, *optional*, defaults to 8):
Number of heads
dim_head (:obj:`int`, *optional*, defaults to 64):
Hidden states dimension inside each head
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
query_dim: int
heads: int = 8
dim_head: int = 64
dropout: float = 0.0
dtype: jnp.dtype = jnp.float32
def setup(self):
inner_dim = self.dim_head * self.heads
self.scale = self.dim_head**-0.5
# Weights were exported with old names {to_q, to_k, to_v, to_out}
self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_q")
self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_k")
self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_v")
self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name="to_out_0")
def reshape_heads_to_batch_dim(self, tensor):
batch_size, seq_len, dim = tensor.shape
head_size = self.heads
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
tensor = jnp.transpose(tensor, (0, 2, 1, 3))
tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)
return tensor
def reshape_batch_dim_to_heads(self, tensor):
batch_size, seq_len, dim = tensor.shape
head_size = self.heads
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
tensor = jnp.transpose(tensor, (0, 2, 1, 3))
tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size)
return tensor
def __call__(self, hidden_states, context=None, deterministic=True):
context = hidden_states if context is None else context
query_proj = self.query(hidden_states)
key_proj = self.key(context)
value_proj = self.value(context)
query_states = self.reshape_heads_to_batch_dim(query_proj)
key_states = self.reshape_heads_to_batch_dim(key_proj)
value_states = self.reshape_heads_to_batch_dim(value_proj)
# compute attentions
attention_scores = jnp.einsum("b i d, b j d->b i j", query_states, key_states)
attention_scores = attention_scores * self.scale
attention_probs = nn.softmax(attention_scores, axis=2)
# attend to values
hidden_states = jnp.einsum("b i j, b j d -> b i d", attention_probs, value_states)
hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
hidden_states = self.proj_attn(hidden_states)
return hidden_states
class FlaxBasicTransformerBlock(nn.Module):
r"""
A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in:
https://arxiv.org/abs/1706.03762
Parameters:
dim (:obj:`int`):
Inner hidden states dimension
n_heads (:obj:`int`):
Number of heads
d_head (:obj:`int`):
Hidden states dimension inside each head
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
dim: int
n_heads: int
d_head: int
dropout: float = 0.0
dtype: jnp.dtype = jnp.float32
def setup(self):
# self attention
self.attn1 = FlaxAttentionBlock(self.dim, self.n_heads, self.d_head, self.dropout, dtype=self.dtype)
# cross attention
self.attn2 = FlaxAttentionBlock(self.dim, self.n_heads, self.d_head, self.dropout, dtype=self.dtype)
self.ff = FlaxGluFeedForward(dim=self.dim, dropout=self.dropout, dtype=self.dtype)
self.norm1 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
self.norm2 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
self.norm3 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
def __call__(self, hidden_states, context, deterministic=True):
# self attention
residual = hidden_states
hidden_states = self.attn1(self.norm1(hidden_states), deterministic=deterministic)
hidden_states = hidden_states + residual
# cross attention
residual = hidden_states
hidden_states = self.attn2(self.norm2(hidden_states), context, deterministic=deterministic)
hidden_states = hidden_states + residual
# feed forward
residual = hidden_states
hidden_states = self.ff(self.norm3(hidden_states), deterministic=deterministic)
hidden_states = hidden_states + residual
return hidden_states
class FlaxSpatialTransformer(nn.Module):
r"""
A Spatial Transformer layer with Gated Linear Unit (GLU) activation function as described in:
https://arxiv.org/pdf/1506.02025.pdf
Parameters:
in_channels (:obj:`int`):
Input number of channels
n_heads (:obj:`int`):
Number of heads
d_head (:obj:`int`):
Hidden states dimension inside each head
depth (:obj:`int`, *optional*, defaults to 1):
Number of transformers block
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
n_heads: int
d_head: int
depth: int = 1
dropout: float = 0.0
dtype: jnp.dtype = jnp.float32
def setup(self):
self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5)
inner_dim = self.n_heads * self.d_head
self.proj_in = nn.Conv(
inner_dim,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
dtype=self.dtype,
)
self.transformer_blocks = [
FlaxBasicTransformerBlock(inner_dim, self.n_heads, self.d_head, dropout=self.dropout, dtype=self.dtype)
for _ in range(self.depth)
]
self.proj_out = nn.Conv(
inner_dim,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
dtype=self.dtype,
)
def __call__(self, hidden_states, context, deterministic=True):
batch, height, width, channels = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
hidden_states = self.proj_in(hidden_states)
hidden_states = hidden_states.reshape(batch, height * width, channels)
for transformer_block in self.transformer_blocks:
hidden_states = transformer_block(hidden_states, context, deterministic=deterministic)
hidden_states = hidden_states.reshape(batch, height, width, channels)
hidden_states = self.proj_out(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
class FlaxGluFeedForward(nn.Module):
r"""
Flax module that encapsulates two Linear layers separated by a gated linear unit activation from:
https://arxiv.org/abs/2002.05202
Parameters:
dim (:obj:`int`):
Inner hidden states dimension
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
dim: int
dropout: float = 0.0
dtype: jnp.dtype = jnp.float32
def setup(self):
# The second linear layer needs to be called
# net_2 for now to match the index of the Sequential layer
self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype)
self.net_2 = nn.Dense(self.dim, dtype=self.dtype)
def __call__(self, hidden_states, deterministic=True):
hidden_states = self.net_0(hidden_states)
hidden_states = self.net_2(hidden_states)
return hidden_states
class FlaxGEGLU(nn.Module):
r"""
Flax implementation of a Linear layer followed by the variant of the gated linear unit activation function from
https://arxiv.org/abs/2002.05202.
Parameters:
dim (:obj:`int`):
Input hidden states dimension
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
dim: int
dropout: float = 0.0
dtype: jnp.dtype = jnp.float32
def setup(self):
inner_dim = self.dim * 4
self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype)
def __call__(self, hidden_states, deterministic=True):
hidden_states = self.proj(hidden_states)
hidden_linear, hidden_gelu = jnp.split(hidden_states, 2, axis=2)
return hidden_linear * nn.gelu(hidden_gelu)
|
diffusers-main
|
src/diffusers/models/attention_flax.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .unet_2d import UNet2DModel
from .unet_2d_condition import UNet2DConditionModel
from .vae import AutoencoderKL, VQModel
if is_flax_available():
from .unet_2d_condition_flax import FlaxUNet2DConditionModel
from .vae_flax import FlaxAutoencoderKL
|
diffusers-main
|
src/diffusers/models/__init__.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flax.linen as nn
import jax
import jax.numpy as jnp
class FlaxUpsample2D(nn.Module):
out_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
def __call__(self, hidden_states):
batch, height, width, channels = hidden_states.shape
hidden_states = jax.image.resize(
hidden_states,
shape=(batch, height * 2, width * 2, channels),
method="nearest",
)
hidden_states = self.conv(hidden_states)
return hidden_states
class FlaxDownsample2D(nn.Module):
out_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(2, 2),
padding=((1, 1), (1, 1)), # padding="VALID",
dtype=self.dtype,
)
def __call__(self, hidden_states):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
hidden_states = self.conv(hidden_states)
return hidden_states
class FlaxResnetBlock2D(nn.Module):
in_channels: int
out_channels: int = None
dropout_prob: float = 0.0
use_nin_shortcut: bool = None
dtype: jnp.dtype = jnp.float32
def setup(self):
out_channels = self.in_channels if self.out_channels is None else self.out_channels
self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
self.conv1 = nn.Conv(
out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
self.time_emb_proj = nn.Dense(out_channels, dtype=self.dtype)
self.norm2 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
self.dropout = nn.Dropout(self.dropout_prob)
self.conv2 = nn.Conv(
out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
self.conv_shortcut = None
if use_nin_shortcut:
self.conv_shortcut = nn.Conv(
out_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
dtype=self.dtype,
)
def __call__(self, hidden_states, temb, deterministic=True):
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states = nn.swish(hidden_states)
hidden_states = self.conv1(hidden_states)
temb = self.time_emb_proj(nn.swish(temb))
temb = jnp.expand_dims(jnp.expand_dims(temb, 1), 1)
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
hidden_states = nn.swish(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
residual = self.conv_shortcut(residual)
return hidden_states + residual
|
diffusers-main
|
src/diffusers/models/resnet_flax.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
from .attention import AttentionBlock, SpatialTransformer
from .resnet import Downsample2D, FirDownsample2D, FirUpsample2D, ResnetBlock2D, Upsample2D
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock2D":
return DownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
)
elif down_block_type == "AttnDownBlock2D":
return AttnDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
attn_num_head_channels=attn_num_head_channels,
)
elif down_block_type == "CrossAttnDownBlock2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D")
return CrossAttnDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
)
elif down_block_type == "SkipDownBlock2D":
return SkipDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
downsample_padding=downsample_padding,
)
elif down_block_type == "AttnSkipDownBlock2D":
return AttnSkipDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
downsample_padding=downsample_padding,
attn_num_head_channels=attn_num_head_channels,
)
elif down_block_type == "DownEncoderBlock2D":
return DownEncoderBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
)
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock2D":
return UpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
)
elif up_block_type == "CrossAttnUpBlock2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D")
return CrossAttnUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
)
elif up_block_type == "AttnUpBlock2D":
return AttnUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
attn_num_head_channels=attn_num_head_channels,
)
elif up_block_type == "SkipUpBlock2D":
return SkipUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
)
elif up_block_type == "AttnSkipUpBlock2D":
return AttnSkipUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
attn_num_head_channels=attn_num_head_channels,
)
elif up_block_type == "UpDecoderBlock2D":
return UpDecoderBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
)
raise ValueError(f"{up_block_type} does not exist.")
class UNetMidBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
attention_type="default",
output_scale_factor=1.0,
**kwargs,
):
super().__init__()
self.attention_type = attention_type
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock2D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
for _ in range(num_layers):
attentions.append(
AttentionBlock(
in_channels,
num_head_channels=attn_num_head_channels,
rescale_output_factor=output_scale_factor,
eps=resnet_eps,
num_groups=resnet_groups,
)
)
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
def forward(self, hidden_states, temb=None, encoder_states=None):
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
if self.attention_type == "default":
hidden_states = attn(hidden_states)
else:
hidden_states = attn(hidden_states, encoder_states)
hidden_states = resnet(hidden_states, temb)
return hidden_states
class UNetMidBlock2DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
attention_type="default",
output_scale_factor=1.0,
cross_attention_dim=1280,
**kwargs,
):
super().__init__()
self.attention_type = attention_type
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock2D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
for _ in range(num_layers):
attentions.append(
SpatialTransformer(
in_channels,
attn_num_head_channels,
in_channels // attn_num_head_channels,
depth=1,
context_dim=cross_attention_dim,
num_groups=resnet_groups,
)
)
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
def set_attention_slice(self, slice_size):
if slice_size is not None and self.attn_num_head_channels % slice_size != 0:
raise ValueError(
f"Make sure slice_size {slice_size} is a divisor of "
f"the number of heads used in cross_attention {self.attn_num_head_channels}"
)
if slice_size is not None and slice_size > self.attn_num_head_channels:
raise ValueError(
f"Chunk_size {slice_size} has to be smaller or equal to "
f"the number of heads used in cross_attention {self.attn_num_head_channels}"
)
for attn in self.attentions:
attn._set_attention_slice(slice_size)
def forward(self, hidden_states, temb=None, encoder_hidden_states=None):
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
hidden_states = attn(hidden_states, encoder_hidden_states)
hidden_states = resnet(hidden_states, temb)
return hidden_states
class AttnDownBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
attention_type="default",
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
):
super().__init__()
resnets = []
attentions = []
self.attention_type = attention_type
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
attentions.append(
AttentionBlock(
out_channels,
num_head_channels=attn_num_head_channels,
rescale_output_factor=output_scale_factor,
eps=resnet_eps,
num_groups=resnet_groups,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
def forward(self, hidden_states, temb=None):
output_states = ()
for resnet, attn in zip(self.resnets, self.attentions):
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states)
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class CrossAttnDownBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
attention_type="default",
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
):
super().__init__()
resnets = []
attentions = []
self.attention_type = attention_type
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
attentions.append(
SpatialTransformer(
out_channels,
attn_num_head_channels,
out_channels // attn_num_head_channels,
depth=1,
context_dim=cross_attention_dim,
num_groups=resnet_groups,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def set_attention_slice(self, slice_size):
if slice_size is not None and self.attn_num_head_channels % slice_size != 0:
raise ValueError(
f"Make sure slice_size {slice_size} is a divisor of "
f"the number of heads used in cross_attention {self.attn_num_head_channels}"
)
if slice_size is not None and slice_size > self.attn_num_head_channels:
raise ValueError(
f"Chunk_size {slice_size} has to be smaller or equal to "
f"the number of heads used in cross_attention {self.attn_num_head_channels}"
)
for attn in self.attentions:
attn._set_attention_slice(slice_size)
def forward(self, hidden_states, temb=None, encoder_hidden_states=None):
output_states = ()
for resnet, attn in zip(self.resnets, self.attentions):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn), hidden_states, encoder_hidden_states
)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, context=encoder_hidden_states)
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class DownBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
):
super().__init__()
resnets = []
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None):
output_states = ()
for resnet in self.resnets:
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
else:
hidden_states = resnet(hidden_states, temb)
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class DownEncoderBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
):
super().__init__()
resnets = []
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=None,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
def forward(self, hidden_states):
for resnet in self.resnets:
hidden_states = resnet(hidden_states, temb=None)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
return hidden_states
class AttnDownEncoderBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
):
super().__init__()
resnets = []
attentions = []
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=None,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
attentions.append(
AttentionBlock(
out_channels,
num_head_channels=attn_num_head_channels,
rescale_output_factor=output_scale_factor,
eps=resnet_eps,
num_groups=resnet_groups,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
def forward(self, hidden_states):
for resnet, attn in zip(self.resnets, self.attentions):
hidden_states = resnet(hidden_states, temb=None)
hidden_states = attn(hidden_states)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
return hidden_states
class AttnSkipDownBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
attention_type="default",
output_scale_factor=np.sqrt(2.0),
downsample_padding=1,
add_downsample=True,
):
super().__init__()
self.attentions = nn.ModuleList([])
self.resnets = nn.ModuleList([])
self.attention_type = attention_type
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
self.resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=min(in_channels // 4, 32),
groups_out=min(out_channels // 4, 32),
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.attentions.append(
AttentionBlock(
out_channels,
num_head_channels=attn_num_head_channels,
rescale_output_factor=output_scale_factor,
eps=resnet_eps,
)
)
if add_downsample:
self.resnet_down = ResnetBlock2D(
in_channels=out_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=min(out_channels // 4, 32),
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_in_shortcut=True,
down=True,
kernel="fir",
)
self.downsamplers = nn.ModuleList([FirDownsample2D(in_channels, out_channels=out_channels)])
self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1))
else:
self.resnet_down = None
self.downsamplers = None
self.skip_conv = None
def forward(self, hidden_states, temb=None, skip_sample=None):
output_states = ()
for resnet, attn in zip(self.resnets, self.attentions):
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states)
output_states += (hidden_states,)
if self.downsamplers is not None:
hidden_states = self.resnet_down(hidden_states, temb)
for downsampler in self.downsamplers:
skip_sample = downsampler(skip_sample)
hidden_states = self.skip_conv(skip_sample) + hidden_states
output_states += (hidden_states,)
return hidden_states, output_states, skip_sample
class SkipDownBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_pre_norm: bool = True,
output_scale_factor=np.sqrt(2.0),
add_downsample=True,
downsample_padding=1,
):
super().__init__()
self.resnets = nn.ModuleList([])
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
self.resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=min(in_channels // 4, 32),
groups_out=min(out_channels // 4, 32),
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
if add_downsample:
self.resnet_down = ResnetBlock2D(
in_channels=out_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=min(out_channels // 4, 32),
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_in_shortcut=True,
down=True,
kernel="fir",
)
self.downsamplers = nn.ModuleList([FirDownsample2D(in_channels, out_channels=out_channels)])
self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1))
else:
self.resnet_down = None
self.downsamplers = None
self.skip_conv = None
def forward(self, hidden_states, temb=None, skip_sample=None):
output_states = ()
for resnet in self.resnets:
hidden_states = resnet(hidden_states, temb)
output_states += (hidden_states,)
if self.downsamplers is not None:
hidden_states = self.resnet_down(hidden_states, temb)
for downsampler in self.downsamplers:
skip_sample = downsampler(skip_sample)
hidden_states = self.skip_conv(skip_sample) + hidden_states
output_states += (hidden_states,)
return hidden_states, output_states, skip_sample
class AttnUpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
prev_output_channel: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attention_type="default",
attn_num_head_channels=1,
output_scale_factor=1.0,
add_upsample=True,
):
super().__init__()
resnets = []
attentions = []
self.attention_type = attention_type
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
attentions.append(
AttentionBlock(
out_channels,
num_head_channels=attn_num_head_channels,
rescale_output_factor=output_scale_factor,
eps=resnet_eps,
num_groups=resnet_groups,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None):
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states)
return hidden_states
class CrossAttnUpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
attention_type="default",
output_scale_factor=1.0,
downsample_padding=1,
add_upsample=True,
):
super().__init__()
resnets = []
attentions = []
self.attention_type = attention_type
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
attentions.append(
SpatialTransformer(
out_channels,
attn_num_head_channels,
out_channels // attn_num_head_channels,
depth=1,
context_dim=cross_attention_dim,
num_groups=resnet_groups,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
def set_attention_slice(self, slice_size):
if slice_size is not None and self.attn_num_head_channels % slice_size != 0:
raise ValueError(
f"Make sure slice_size {slice_size} is a divisor of "
f"the number of heads used in cross_attention {self.attn_num_head_channels}"
)
if slice_size is not None and slice_size > self.attn_num_head_channels:
raise ValueError(
f"Chunk_size {slice_size} has to be smaller or equal to "
f"the number of heads used in cross_attention {self.attn_num_head_channels}"
)
for attn in self.attentions:
attn._set_attention_slice(slice_size)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
res_hidden_states_tuple,
temb=None,
encoder_hidden_states=None,
upsample_size=None,
):
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn), hidden_states, encoder_hidden_states
)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, context=encoder_hidden_states)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
class UpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
prev_output_channel: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_upsample=True,
):
super().__init__()
resnets = []
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
for resnet in self.resnets:
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
else:
hidden_states = resnet(hidden_states, temb)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
class UpDecoderBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_upsample=True,
):
super().__init__()
resnets = []
for i in range(num_layers):
input_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=input_channels,
out_channels=out_channels,
temb_channels=None,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(self, hidden_states):
for resnet in self.resnets:
hidden_states = resnet(hidden_states, temb=None)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states)
return hidden_states
class AttnUpDecoderBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
add_upsample=True,
):
super().__init__()
resnets = []
attentions = []
for i in range(num_layers):
input_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=input_channels,
out_channels=out_channels,
temb_channels=None,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
attentions.append(
AttentionBlock(
out_channels,
num_head_channels=attn_num_head_channels,
rescale_output_factor=output_scale_factor,
eps=resnet_eps,
num_groups=resnet_groups,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(self, hidden_states):
for resnet, attn in zip(self.resnets, self.attentions):
hidden_states = resnet(hidden_states, temb=None)
hidden_states = attn(hidden_states)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states)
return hidden_states
class AttnSkipUpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
prev_output_channel: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
attention_type="default",
output_scale_factor=np.sqrt(2.0),
upsample_padding=1,
add_upsample=True,
):
super().__init__()
self.attentions = nn.ModuleList([])
self.resnets = nn.ModuleList([])
self.attention_type = attention_type
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
self.resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=min(resnet_in_channels + res_skip_channels // 4, 32),
groups_out=min(out_channels // 4, 32),
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.attentions.append(
AttentionBlock(
out_channels,
num_head_channels=attn_num_head_channels,
rescale_output_factor=output_scale_factor,
eps=resnet_eps,
)
)
self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels)
if add_upsample:
self.resnet_up = ResnetBlock2D(
in_channels=out_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=min(out_channels // 4, 32),
groups_out=min(out_channels // 4, 32),
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_in_shortcut=True,
up=True,
kernel="fir",
)
self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.skip_norm = torch.nn.GroupNorm(
num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True
)
self.act = nn.SiLU()
else:
self.resnet_up = None
self.skip_conv = None
self.skip_norm = None
self.act = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None):
for resnet in self.resnets:
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
hidden_states = self.attentions[0](hidden_states)
if skip_sample is not None:
skip_sample = self.upsampler(skip_sample)
else:
skip_sample = 0
if self.resnet_up is not None:
skip_sample_states = self.skip_norm(hidden_states)
skip_sample_states = self.act(skip_sample_states)
skip_sample_states = self.skip_conv(skip_sample_states)
skip_sample = skip_sample + skip_sample_states
hidden_states = self.resnet_up(hidden_states, temb)
return hidden_states, skip_sample
class SkipUpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
prev_output_channel: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_pre_norm: bool = True,
output_scale_factor=np.sqrt(2.0),
add_upsample=True,
upsample_padding=1,
):
super().__init__()
self.resnets = nn.ModuleList([])
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
self.resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=min((resnet_in_channels + res_skip_channels) // 4, 32),
groups_out=min(out_channels // 4, 32),
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels)
if add_upsample:
self.resnet_up = ResnetBlock2D(
in_channels=out_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=min(out_channels // 4, 32),
groups_out=min(out_channels // 4, 32),
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_in_shortcut=True,
up=True,
kernel="fir",
)
self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.skip_norm = torch.nn.GroupNorm(
num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True
)
self.act = nn.SiLU()
else:
self.resnet_up = None
self.skip_conv = None
self.skip_norm = None
self.act = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None):
for resnet in self.resnets:
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
if skip_sample is not None:
skip_sample = self.upsampler(skip_sample)
else:
skip_sample = 0
if self.resnet_up is not None:
skip_sample_states = self.skip_norm(hidden_states)
skip_sample_states = self.act(skip_sample_states)
skip_sample_states = self.skip_conv(skip_sample_states)
skip_sample = skip_sample + skip_sample_states
hidden_states = self.resnet_up(hidden_states, temb)
return hidden_states, skip_sample
|
diffusers-main
|
src/diffusers/models/unet_blocks.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..modeling_flax_utils import FlaxModelMixin
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .unet_blocks_flax import (
FlaxCrossAttnDownBlock2D,
FlaxCrossAttnUpBlock2D,
FlaxDownBlock2D,
FlaxUNetMidBlock2DCrossAttn,
FlaxUpBlock2D,
)
@flax.struct.dataclass
class FlaxUNet2DConditionOutput(BaseOutput):
"""
Args:
sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: jnp.ndarray
@flax_register_to_config
class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
r"""
FlaxUNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a
timestep and returns sample shaped output.
This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Also, this model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
sample_size (`int`, *optional*):
The size of the input sample.
in_channels (`int`, *optional*, defaults to 4):
The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4):
The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use. The corresponding class names will be: "FlaxCrossAttnDownBlock2D",
"FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D"
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use. The corresponding class names will be: "FlaxUpBlock2D",
"FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D"
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2):
The number of layers per block.
attention_head_dim (`int`, *optional*, defaults to 8):
The dimension of the attention heads.
cross_attention_dim (`int`, *optional*, defaults to 768):
The dimension of the cross attention features.
dropout (`float`, *optional*, defaults to 0):
Dropout probability for down, up and bottleneck blocks.
"""
sample_size: int = 32
in_channels: int = 4
out_channels: int = 4
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
layers_per_block: int = 2
attention_head_dim: int = 8
cross_attention_dim: int = 1280
dropout: float = 0.0
dtype: jnp.dtype = jnp.float32
freq_shift: int = 0
def init_weights(self, rng: jax.random.PRNGKey) -> FrozenDict:
# init input tensors
sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
sample = jnp.zeros(sample_shape, dtype=jnp.float32)
timesteps = jnp.ones((1,), dtype=jnp.int32)
encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.init(rngs, sample, timesteps, encoder_hidden_states)["params"]
def setup(self):
block_out_channels = self.block_out_channels
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = nn.Conv(
block_out_channels[0],
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
# time
self.time_proj = FlaxTimesteps(block_out_channels[0], freq_shift=self.config.freq_shift)
self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
# down
down_blocks = []
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
if down_block_type == "CrossAttnDownBlock2D":
down_block = FlaxCrossAttnDownBlock2D(
in_channels=input_channel,
out_channels=output_channel,
dropout=self.dropout,
num_layers=self.layers_per_block,
attn_num_head_channels=self.attention_head_dim,
add_downsample=not is_final_block,
dtype=self.dtype,
)
else:
down_block = FlaxDownBlock2D(
in_channels=input_channel,
out_channels=output_channel,
dropout=self.dropout,
num_layers=self.layers_per_block,
add_downsample=not is_final_block,
dtype=self.dtype,
)
down_blocks.append(down_block)
self.down_blocks = down_blocks
# mid
self.mid_block = FlaxUNetMidBlock2DCrossAttn(
in_channels=block_out_channels[-1],
dropout=self.dropout,
attn_num_head_channels=self.attention_head_dim,
dtype=self.dtype,
)
# up
up_blocks = []
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
is_final_block = i == len(block_out_channels) - 1
if up_block_type == "CrossAttnUpBlock2D":
up_block = FlaxCrossAttnUpBlock2D(
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
num_layers=self.layers_per_block + 1,
attn_num_head_channels=self.attention_head_dim,
add_upsample=not is_final_block,
dropout=self.dropout,
dtype=self.dtype,
)
else:
up_block = FlaxUpBlock2D(
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
num_layers=self.layers_per_block + 1,
add_upsample=not is_final_block,
dropout=self.dropout,
dtype=self.dtype,
)
up_blocks.append(up_block)
prev_output_channel = output_channel
self.up_blocks = up_blocks
# out
self.conv_norm_out = nn.GroupNorm(num_groups=32, epsilon=1e-5)
self.conv_out = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
def __call__(
self,
sample,
timesteps,
encoder_hidden_states,
return_dict: bool = True,
train: bool = False,
) -> Union[FlaxUNet2DConditionOutput, Tuple]:
r"""
Args:
sample (`jnp.ndarray`): (channel, height, width) noisy inputs tensor
timestep (`jnp.ndarray` or `float` or `int`): timesteps
encoder_hidden_states (`jnp.ndarray`): (channel, height, width) encoder hidden states
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a
plain tuple.
train (`bool`, *optional*, defaults to `False`):
Use deterministic functions and disable dropout when not training.
Returns:
[`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`.
When returning a tuple, the first element is the sample tensor.
"""
# 1. time
if not isinstance(timesteps, jnp.ndarray):
timesteps = jnp.array([timesteps], dtype=jnp.int32)
elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0:
timesteps = timesteps.astype(dtype=jnp.float32)
timesteps = jnp.expand_dims(timesteps, 0)
t_emb = self.time_proj(timesteps)
t_emb = self.time_embedding(t_emb)
# 2. pre-process
sample = jnp.transpose(sample, (0, 2, 3, 1))
sample = self.conv_in(sample)
# 3. down
down_block_res_samples = (sample,)
for down_block in self.down_blocks:
if isinstance(down_block, FlaxCrossAttnDownBlock2D):
sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
else:
sample, res_samples = down_block(sample, t_emb, deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
# 5. up
for up_block in self.up_blocks:
res_samples = down_block_res_samples[-(self.layers_per_block + 1) :]
down_block_res_samples = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(up_block, FlaxCrossAttnUpBlock2D):
sample = up_block(
sample,
temb=t_emb,
encoder_hidden_states=encoder_hidden_states,
res_hidden_states_tuple=res_samples,
deterministic=not train,
)
else:
sample = up_block(sample, temb=t_emb, res_hidden_states_tuple=res_samples, deterministic=not train)
# 6. post-process
sample = self.conv_norm_out(sample)
sample = nn.silu(sample)
sample = self.conv_out(sample)
sample = jnp.transpose(sample, (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNet2DConditionOutput(sample=sample)
|
diffusers-main
|
src/diffusers/models/unet_2d_condition_flax.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import torch
from torch import nn
def get_timestep_embedding(
timesteps: torch.Tensor,
embedding_dim: int,
flip_sin_to_cos: bool = False,
downscale_freq_shift: float = 1,
scale: float = 1,
max_period: int = 10000,
):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
embeddings. :return: an [N x dim] Tensor of positional embeddings.
"""
assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
half_dim = embedding_dim // 2
exponent = -math.log(max_period) * torch.arange(
start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
)
exponent = exponent / (half_dim - downscale_freq_shift)
emb = torch.exp(exponent)
emb = timesteps[:, None].float() * emb[None, :]
# scale embeddings
emb = scale * emb
# concat sine and cosine embeddings
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
# flip sine and cosine embeddings
if flip_sin_to_cos:
emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
# zero pad
if embedding_dim % 2 == 1:
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
class TimestepEmbedding(nn.Module):
def __init__(self, channel: int, time_embed_dim: int, act_fn: str = "silu"):
super().__init__()
self.linear_1 = nn.Linear(channel, time_embed_dim)
self.act = None
if act_fn == "silu":
self.act = nn.SiLU()
self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim)
def forward(self, sample):
sample = self.linear_1(sample)
if self.act is not None:
sample = self.act(sample)
sample = self.linear_2(sample)
return sample
class Timesteps(nn.Module):
def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float):
super().__init__()
self.num_channels = num_channels
self.flip_sin_to_cos = flip_sin_to_cos
self.downscale_freq_shift = downscale_freq_shift
def forward(self, timesteps):
t_emb = get_timestep_embedding(
timesteps,
self.num_channels,
flip_sin_to_cos=self.flip_sin_to_cos,
downscale_freq_shift=self.downscale_freq_shift,
)
return t_emb
class GaussianFourierProjection(nn.Module):
"""Gaussian Fourier embeddings for noise levels."""
def __init__(self, embedding_size: int = 256, scale: float = 1.0):
super().__init__()
self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
# to delete later
self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
self.weight = self.W
def forward(self, x):
x = torch.log(x)
x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi
out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
return out
|
diffusers-main
|
src/diffusers/models/embeddings.py
|
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
class Upsample2D(nn.Module):
"""
An upsampling layer with an optional convolution.
Parameters:
channels: channels in the inputs and outputs.
use_conv: a bool determining if a convolution is applied.
dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_conv_transpose = use_conv_transpose
self.name = name
conv = None
if use_conv_transpose:
conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1)
elif use_conv:
conv = nn.Conv2d(self.channels, self.out_channels, 3, padding=1)
# TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
if name == "conv":
self.conv = conv
else:
self.Conv2d_0 = conv
def forward(self, hidden_states, output_size=None):
assert hidden_states.shape[1] == self.channels
if self.use_conv_transpose:
return self.conv(hidden_states)
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
# TODO(Suraj): Remove this cast once the issue is fixed in PyTorch
# https://github.com/pytorch/pytorch/issues/86679
dtype = hidden_states.dtype
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(torch.float32)
# if `output_size` is passed we force the interpolation output
# size and do not make use of `scale_factor=2`
if output_size is None:
hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
else:
hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
# If the input is bfloat16, we cast back to bfloat16
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(dtype)
# TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
if self.use_conv:
if self.name == "conv":
hidden_states = self.conv(hidden_states)
else:
hidden_states = self.Conv2d_0(hidden_states)
return hidden_states
class Downsample2D(nn.Module):
"""
A downsampling layer with an optional convolution.
Parameters:
channels: channels in the inputs and outputs.
use_conv: a bool determining if a convolution is applied.
dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.padding = padding
stride = 2
self.name = name
if use_conv:
conv = nn.Conv2d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
else:
assert self.channels == self.out_channels
conv = nn.AvgPool2d(kernel_size=stride, stride=stride)
# TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
if name == "conv":
self.Conv2d_0 = conv
self.conv = conv
elif name == "Conv2d_0":
self.conv = conv
else:
self.conv = conv
def forward(self, hidden_states):
assert hidden_states.shape[1] == self.channels
if self.use_conv and self.padding == 0:
pad = (0, 1, 0, 1)
hidden_states = F.pad(hidden_states, pad, mode="constant", value=0)
assert hidden_states.shape[1] == self.channels
hidden_states = self.conv(hidden_states)
return hidden_states
class FirUpsample2D(nn.Module):
def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)):
super().__init__()
out_channels = out_channels if out_channels else channels
if use_conv:
self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1)
self.use_conv = use_conv
self.fir_kernel = fir_kernel
self.out_channels = out_channels
def _upsample_2d(self, hidden_states, weight=None, kernel=None, factor=2, gain=1):
"""Fused `upsample_2d()` followed by `Conv2d()`.
Padding is performed only once at the beginning, not between the operations. The fused op is considerably more
efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of
arbitrary order.
Args:
hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
weight: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
output: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same
datatype as `hidden_states`.
"""
assert isinstance(factor, int) and factor >= 1
# Setup filter kernel.
if kernel is None:
kernel = [1] * factor
# setup kernel
kernel = torch.tensor(kernel, dtype=torch.float32)
if kernel.ndim == 1:
kernel = torch.outer(kernel, kernel)
kernel /= torch.sum(kernel)
kernel = kernel * (gain * (factor**2))
if self.use_conv:
convH = weight.shape[2]
convW = weight.shape[3]
inC = weight.shape[1]
pad_value = (kernel.shape[0] - factor) - (convW - 1)
stride = (factor, factor)
# Determine data dimensions.
output_shape = (
(hidden_states.shape[2] - 1) * factor + convH,
(hidden_states.shape[3] - 1) * factor + convW,
)
output_padding = (
output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH,
output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW,
)
assert output_padding[0] >= 0 and output_padding[1] >= 0
num_groups = hidden_states.shape[1] // inC
# Transpose weights.
weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW))
weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4)
weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW))
inverse_conv = F.conv_transpose2d(
hidden_states, weight, stride=stride, output_padding=output_padding, padding=0
)
output = upfirdn2d_native(
inverse_conv,
torch.tensor(kernel, device=inverse_conv.device),
pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1),
)
else:
pad_value = kernel.shape[0] - factor
output = upfirdn2d_native(
hidden_states,
torch.tensor(kernel, device=hidden_states.device),
up=factor,
pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2),
)
return output
def forward(self, hidden_states):
if self.use_conv:
height = self._upsample_2d(hidden_states, self.Conv2d_0.weight, kernel=self.fir_kernel)
height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1)
else:
height = self._upsample_2d(hidden_states, kernel=self.fir_kernel, factor=2)
return height
class FirDownsample2D(nn.Module):
def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)):
super().__init__()
out_channels = out_channels if out_channels else channels
if use_conv:
self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1)
self.fir_kernel = fir_kernel
self.use_conv = use_conv
self.out_channels = out_channels
def _downsample_2d(self, hidden_states, weight=None, kernel=None, factor=2, gain=1):
"""Fused `Conv2d()` followed by `downsample_2d()`.
Padding is performed only once at the beginning, not between the operations. The fused op is considerably more
efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of
arbitrary order.
Args:
hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
weight:
Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be
performed by `inChannels = x.shape[0] // numGroups`.
kernel: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] *
factor`, which corresponds to average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
output: Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and
same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
if kernel is None:
kernel = [1] * factor
# setup kernel
kernel = torch.tensor(kernel, dtype=torch.float32)
if kernel.ndim == 1:
kernel = torch.outer(kernel, kernel)
kernel /= torch.sum(kernel)
kernel = kernel * gain
if self.use_conv:
_, _, convH, convW = weight.shape
pad_value = (kernel.shape[0] - factor) + (convW - 1)
stride_value = [factor, factor]
upfirdn_input = upfirdn2d_native(
hidden_states,
torch.tensor(kernel, device=hidden_states.device),
pad=((pad_value + 1) // 2, pad_value // 2),
)
output = F.conv2d(upfirdn_input, weight, stride=stride_value, padding=0)
else:
pad_value = kernel.shape[0] - factor
output = upfirdn2d_native(
hidden_states,
torch.tensor(kernel, device=hidden_states.device),
down=factor,
pad=((pad_value + 1) // 2, pad_value // 2),
)
return output
def forward(self, hidden_states):
if self.use_conv:
downsample_input = self._downsample_2d(hidden_states, weight=self.Conv2d_0.weight, kernel=self.fir_kernel)
hidden_states = downsample_input + self.Conv2d_0.bias.reshape(1, -1, 1, 1)
else:
hidden_states = self._downsample_2d(hidden_states, kernel=self.fir_kernel, factor=2)
return hidden_states
class ResnetBlock2D(nn.Module):
def __init__(
self,
*,
in_channels,
out_channels=None,
conv_shortcut=False,
dropout=0.0,
temb_channels=512,
groups=32,
groups_out=None,
pre_norm=True,
eps=1e-6,
non_linearity="swish",
time_embedding_norm="default",
kernel=None,
output_scale_factor=1.0,
use_in_shortcut=None,
up=False,
down=False,
):
super().__init__()
self.pre_norm = pre_norm
self.pre_norm = True
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.time_embedding_norm = time_embedding_norm
self.up = up
self.down = down
self.output_scale_factor = output_scale_factor
if groups_out is None:
groups_out = groups
self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
if temb_channels is not None:
self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels)
else:
self.time_emb_proj = None
self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if non_linearity == "swish":
self.nonlinearity = lambda x: F.silu(x)
elif non_linearity == "mish":
self.nonlinearity = Mish()
elif non_linearity == "silu":
self.nonlinearity = nn.SiLU()
self.upsample = self.downsample = None
if self.up:
if kernel == "fir":
fir_kernel = (1, 3, 3, 1)
self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel)
elif kernel == "sde_vp":
self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest")
else:
self.upsample = Upsample2D(in_channels, use_conv=False)
elif self.down:
if kernel == "fir":
fir_kernel = (1, 3, 3, 1)
self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel)
elif kernel == "sde_vp":
self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2)
else:
self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op")
self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
self.conv_shortcut = None
if self.use_in_shortcut:
self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, input_tensor, temb):
hidden_states = input_tensor
hidden_states = self.norm1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
if self.upsample is not None:
input_tensor = self.upsample(input_tensor)
hidden_states = self.upsample(hidden_states)
elif self.downsample is not None:
input_tensor = self.downsample(input_tensor)
hidden_states = self.downsample(hidden_states)
hidden_states = self.conv1(hidden_states)
if temb is not None:
temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None]
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
input_tensor = self.conv_shortcut(input_tensor)
output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
return output_tensor
class Mish(torch.nn.Module):
def forward(self, hidden_states):
return hidden_states * torch.tanh(torch.nn.functional.softplus(hidden_states))
def upsample_2d(hidden_states, kernel=None, factor=2, gain=1):
r"""Upsample2D a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given
filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified
`gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is
a: multiple of the upsampling factor.
Args:
hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
output: Tensor of the shape `[N, C, H * factor, W * factor]`
"""
assert isinstance(factor, int) and factor >= 1
if kernel is None:
kernel = [1] * factor
kernel = torch.tensor(kernel, dtype=torch.float32)
if kernel.ndim == 1:
kernel = torch.outer(kernel, kernel)
kernel /= torch.sum(kernel)
kernel = kernel * (gain * (factor**2))
pad_value = kernel.shape[0] - factor
output = upfirdn2d_native(
hidden_states,
kernel.to(device=hidden_states.device),
up=factor,
pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2),
)
return output
def downsample_2d(hidden_states, kernel=None, factor=2, gain=1):
r"""Downsample2D a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the
given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the
specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its
shape is a multiple of the downsampling factor.
Args:
hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
output: Tensor of the shape `[N, C, H // factor, W // factor]`
"""
assert isinstance(factor, int) and factor >= 1
if kernel is None:
kernel = [1] * factor
kernel = torch.tensor(kernel, dtype=torch.float32)
if kernel.ndim == 1:
kernel = torch.outer(kernel, kernel)
kernel /= torch.sum(kernel)
kernel = kernel * gain
pad_value = kernel.shape[0] - factor
output = upfirdn2d_native(
hidden_states, kernel.to(device=hidden_states.device), down=factor, pad=((pad_value + 1) // 2, pad_value // 2)
)
return output
def upfirdn2d_native(tensor, kernel, up=1, down=1, pad=(0, 0)):
up_x = up_y = up
down_x = down_y = down
pad_x0 = pad_y0 = pad[0]
pad_x1 = pad_y1 = pad[1]
_, channel, in_h, in_w = tensor.shape
tensor = tensor.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = tensor.shape
kernel_h, kernel_w = kernel.shape
out = tensor.view(-1, in_h, 1, in_w, 1, minor)
# Temporary workaround for mps specific issue: https://github.com/pytorch/pytorch/issues/84535
if tensor.device.type == "mps":
out = out.to("cpu")
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)])
out = out.to(tensor.device) # Move back to mps if necessary
out = out[
:,
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
:,
]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
|
diffusers-main
|
src/diffusers/models/resnet.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..modeling_utils import ModelMixin
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .unet_blocks import UNetMidBlock2D, get_down_block, get_up_block
@dataclass
class UNet2DOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Hidden states output. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet2DModel(ModelMixin, ConfigMixin):
r"""
UNet2DModel is a 2D UNet model that takes in a noisy sample and a timestep and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the model (such as downloading or saving, etc.)
Parameters:
sample_size (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`, *optional*):
Input sample size.
in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use.
freq_shift (`int`, *optional*, defaults to 0): Frequency shift for fourier time embedding.
flip_sin_to_cos (`bool`, *optional*, defaults to :
obj:`False`): Whether to flip sin to cos for fourier time embedding.
down_block_types (`Tuple[str]`, *optional*, defaults to :
obj:`("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`): Tuple of downsample block
types.
up_block_types (`Tuple[str]`, *optional*, defaults to :
obj:`("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`): Tuple of upsample block types.
block_out_channels (`Tuple[int]`, *optional*, defaults to :
obj:`(224, 448, 672, 896)`): Tuple of block output channels.
layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block.
mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block.
downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension.
norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for the normalization.
norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for the normalization.
"""
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 3,
out_channels: int = 3,
center_input_sample: bool = False,
time_embedding_type: str = "positional",
freq_shift: int = 0,
flip_sin_to_cos: bool = True,
down_block_types: Tuple[str] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"),
up_block_types: Tuple[str] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"),
block_out_channels: Tuple[int] = (224, 448, 672, 896),
layers_per_block: int = 2,
mid_block_scale_factor: float = 1,
downsample_padding: int = 1,
act_fn: str = "silu",
attention_head_dim: int = 8,
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
if time_embedding_type == "fourier":
self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16)
timestep_input_dim = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
attn_num_head_channels=attention_head_dim,
downsample_padding=downsample_padding,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = UNetMidBlock2D(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift="default",
attn_num_head_channels=attention_head_dim,
resnet_groups=norm_num_groups,
)
# up
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
is_final_block = i == len(block_out_channels) - 1
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
attn_num_head_channels=attention_head_dim,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
return_dict: bool = True,
) -> Union[UNet2DOutput, Tuple]:
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int): (batch) timesteps
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple.
Returns:
[`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True,
otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
"""
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device)
t_emb = self.time_proj(timesteps)
emb = self.time_embedding(t_emb)
# 2. pre-process
skip_sample = sample
sample = self.conv_in(sample)
# 3. down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "skip_conv"):
sample, res_samples, skip_sample = downsample_block(
hidden_states=sample, temb=emb, skip_sample=skip_sample
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
# 4. mid
sample = self.mid_block(sample, emb)
# 5. up
skip_sample = None
for upsample_block in self.up_blocks:
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
if hasattr(upsample_block, "skip_conv"):
sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample)
else:
sample = upsample_block(sample, res_samples, emb)
# 6. post-process
# make sure hidden states is in float32
# when running in half-precision
sample = self.conv_norm_out(sample.float()).type(sample.dtype)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if skip_sample is not None:
sample += skip_sample
if self.config.time_embedding_type == "fourier":
timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:]))))
sample = sample / timesteps
if not return_dict:
return (sample,)
return UNet2DOutput(sample=sample)
|
diffusers-main
|
src/diffusers/models/unet_2d.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import flax.linen as nn
import jax.numpy as jnp
# This is like models.embeddings.get_timestep_embedding (PyTorch) but
# less general (only handles the case we currently need).
def get_sinusoidal_embeddings(timesteps, embedding_dim, freq_shift: float = 1):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
:param timesteps: a 1-D tensor of N indices, one per batch element.
These may be fractional.
:param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
embeddings. :return: an [N x dim] tensor of positional embeddings.
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - freq_shift)
emb = jnp.exp(jnp.arange(half_dim) * -emb)
emb = timesteps[:, None] * emb[None, :]
emb = jnp.concatenate([jnp.cos(emb), jnp.sin(emb)], -1)
return emb
class FlaxTimestepEmbedding(nn.Module):
r"""
Time step Embedding Module. Learns embeddings for input time steps.
Args:
time_embed_dim (`int`, *optional*, defaults to `32`):
Time step embedding dimension
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
time_embed_dim: int = 32
dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(self, temb):
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_1")(temb)
temb = nn.silu(temb)
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_2")(temb)
return temb
class FlaxTimesteps(nn.Module):
r"""
Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239
Args:
dim (`int`, *optional*, defaults to `32`):
Time step embedding dimension
"""
dim: int = 32
freq_shift: float = 1
@nn.compact
def __call__(self, timesteps):
return get_sinusoidal_embeddings(timesteps, self.dim, freq_shift=self.freq_shift)
|
diffusers-main
|
src/diffusers/models/embeddings_flax.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# JAX implementation of VQGAN from taming-transformers https://github.com/CompVis/taming-transformers
import math
from functools import partial
from typing import Tuple
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..modeling_flax_utils import FlaxModelMixin
from ..utils import BaseOutput
@flax.struct.dataclass
class FlaxDecoderOutput(BaseOutput):
"""
Output of decoding method.
Args:
sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):
Decoded output sample of the model. Output of the last layer of the model.
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
sample: jnp.ndarray
@flax.struct.dataclass
class FlaxAutoencoderKLOutput(BaseOutput):
"""
Output of AutoencoderKL encoding method.
Args:
latent_dist (`FlaxDiagonalGaussianDistribution`):
Encoded outputs of `Encoder` represented as the mean and logvar of `FlaxDiagonalGaussianDistribution`.
`FlaxDiagonalGaussianDistribution` allows for sampling latents from the distribution.
"""
latent_dist: "FlaxDiagonalGaussianDistribution"
class FlaxUpsample2D(nn.Module):
"""
Flax implementation of 2D Upsample layer
Args:
in_channels (`int`):
Input channels
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(
self.in_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
def __call__(self, hidden_states):
batch, height, width, channels = hidden_states.shape
hidden_states = jax.image.resize(
hidden_states,
shape=(batch, height * 2, width * 2, channels),
method="nearest",
)
hidden_states = self.conv(hidden_states)
return hidden_states
class FlaxDownsample2D(nn.Module):
"""
Flax implementation of 2D Downsample layer
Args:
in_channels (`int`):
Input channels
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(
self.in_channels,
kernel_size=(3, 3),
strides=(2, 2),
padding="VALID",
dtype=self.dtype,
)
def __call__(self, hidden_states):
pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
hidden_states = jnp.pad(hidden_states, pad_width=pad)
hidden_states = self.conv(hidden_states)
return hidden_states
class FlaxResnetBlock2D(nn.Module):
"""
Flax implementation of 2D Resnet Block.
Args:
in_channels (`int`):
Input channels
out_channels (`int`):
Output channels
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
groups (:obj:`int`, *optional*, defaults to `32`):
The number of groups to use for group norm.
use_nin_shortcut (:obj:`bool`, *optional*, defaults to `None`):
Whether to use `nin_shortcut`. This activates a new layer inside ResNet block
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
out_channels: int = None
dropout: float = 0.0
groups: int = 32
use_nin_shortcut: bool = None
dtype: jnp.dtype = jnp.float32
def setup(self):
out_channels = self.in_channels if self.out_channels is None else self.out_channels
self.norm1 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6)
self.conv1 = nn.Conv(
out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
self.norm2 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6)
self.dropout_layer = nn.Dropout(self.dropout)
self.conv2 = nn.Conv(
out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
self.conv_shortcut = None
if use_nin_shortcut:
self.conv_shortcut = nn.Conv(
out_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
dtype=self.dtype,
)
def __call__(self, hidden_states, deterministic=True):
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states = nn.swish(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm2(hidden_states)
hidden_states = nn.swish(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
residual = self.conv_shortcut(residual)
return hidden_states + residual
class FlaxAttentionBlock(nn.Module):
r"""
Flax Convolutional based multi-head attention block for diffusion-based VAE.
Parameters:
channels (:obj:`int`):
Input channels
num_head_channels (:obj:`int`, *optional*, defaults to `None`):
Number of attention heads
num_groups (:obj:`int`, *optional*, defaults to `32`):
The number of groups to use for group norm
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
channels: int
num_head_channels: int = None
num_groups: int = 32
dtype: jnp.dtype = jnp.float32
def setup(self):
self.num_heads = self.channels // self.num_head_channels if self.num_head_channels is not None else 1
dense = partial(nn.Dense, self.channels, dtype=self.dtype)
self.group_norm = nn.GroupNorm(num_groups=self.num_groups, epsilon=1e-6)
self.query, self.key, self.value = dense(), dense(), dense()
self.proj_attn = dense()
def transpose_for_scores(self, projection):
new_projection_shape = projection.shape[:-1] + (self.num_heads, -1)
# move heads to 2nd position (B, T, H * D) -> (B, T, H, D)
new_projection = projection.reshape(new_projection_shape)
# (B, T, H, D) -> (B, H, T, D)
new_projection = jnp.transpose(new_projection, (0, 2, 1, 3))
return new_projection
def __call__(self, hidden_states):
residual = hidden_states
batch, height, width, channels = hidden_states.shape
hidden_states = self.group_norm(hidden_states)
hidden_states = hidden_states.reshape((batch, height * width, channels))
query = self.query(hidden_states)
key = self.key(hidden_states)
value = self.value(hidden_states)
# transpose
query = self.transpose_for_scores(query)
key = self.transpose_for_scores(key)
value = self.transpose_for_scores(value)
# compute attentions
scale = 1 / math.sqrt(math.sqrt(self.channels / self.num_heads))
attn_weights = jnp.einsum("...qc,...kc->...qk", query * scale, key * scale)
attn_weights = nn.softmax(attn_weights, axis=-1)
# attend to values
hidden_states = jnp.einsum("...kc,...qk->...qc", value, attn_weights)
hidden_states = jnp.transpose(hidden_states, (0, 2, 1, 3))
new_hidden_states_shape = hidden_states.shape[:-2] + (self.channels,)
hidden_states = hidden_states.reshape(new_hidden_states_shape)
hidden_states = self.proj_attn(hidden_states)
hidden_states = hidden_states.reshape((batch, height, width, channels))
hidden_states = hidden_states + residual
return hidden_states
class FlaxDownEncoderBlock2D(nn.Module):
r"""
Flax Resnet blocks-based Encoder block for diffusion-based VAE.
Parameters:
in_channels (:obj:`int`):
Input channels
out_channels (:obj:`int`):
Output channels
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
num_layers (:obj:`int`, *optional*, defaults to 1):
Number of Resnet layer block
resnet_groups (:obj:`int`, *optional*, defaults to `32`):
The number of groups to use for the Resnet block group norm
add_downsample (:obj:`bool`, *optional*, defaults to `True`):
Whether to add downsample layer
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
out_channels: int
dropout: float = 0.0
num_layers: int = 1
resnet_groups: int = 32
add_downsample: bool = True
dtype: jnp.dtype = jnp.float32
def setup(self):
resnets = []
for i in range(self.num_layers):
in_channels = self.in_channels if i == 0 else self.out_channels
res_block = FlaxResnetBlock2D(
in_channels=in_channels,
out_channels=self.out_channels,
dropout=self.dropout,
groups=self.resnet_groups,
dtype=self.dtype,
)
resnets.append(res_block)
self.resnets = resnets
if self.add_downsample:
self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)
def __call__(self, hidden_states, deterministic=True):
for resnet in self.resnets:
hidden_states = resnet(hidden_states, deterministic=deterministic)
if self.add_downsample:
hidden_states = self.downsamplers_0(hidden_states)
return hidden_states
class FlaxUpDecoderBlock2D(nn.Module):
r"""
Flax Resnet blocks-based Decoder block for diffusion-based VAE.
Parameters:
in_channels (:obj:`int`):
Input channels
out_channels (:obj:`int`):
Output channels
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
num_layers (:obj:`int`, *optional*, defaults to 1):
Number of Resnet layer block
resnet_groups (:obj:`int`, *optional*, defaults to `32`):
The number of groups to use for the Resnet block group norm
add_upsample (:obj:`bool`, *optional*, defaults to `True`):
Whether to add upsample layer
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
out_channels: int
dropout: float = 0.0
num_layers: int = 1
resnet_groups: int = 32
add_upsample: bool = True
dtype: jnp.dtype = jnp.float32
def setup(self):
resnets = []
for i in range(self.num_layers):
in_channels = self.in_channels if i == 0 else self.out_channels
res_block = FlaxResnetBlock2D(
in_channels=in_channels,
out_channels=self.out_channels,
dropout=self.dropout,
groups=self.resnet_groups,
dtype=self.dtype,
)
resnets.append(res_block)
self.resnets = resnets
if self.add_upsample:
self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype)
def __call__(self, hidden_states, deterministic=True):
for resnet in self.resnets:
hidden_states = resnet(hidden_states, deterministic=deterministic)
if self.add_upsample:
hidden_states = self.upsamplers_0(hidden_states)
return hidden_states
class FlaxUNetMidBlock2D(nn.Module):
r"""
Flax Unet Mid-Block module.
Parameters:
in_channels (:obj:`int`):
Input channels
dropout (:obj:`float`, *optional*, defaults to 0.0):
Dropout rate
num_layers (:obj:`int`, *optional*, defaults to 1):
Number of Resnet layer block
resnet_groups (:obj:`int`, *optional*, defaults to `32`):
The number of groups to use for the Resnet and Attention block group norm
attn_num_head_channels (:obj:`int`, *optional*, defaults to `1`):
Number of attention heads for each attention block
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int
dropout: float = 0.0
num_layers: int = 1
resnet_groups: int = 32
attn_num_head_channels: int = 1
dtype: jnp.dtype = jnp.float32
def setup(self):
resnet_groups = self.resnet_groups if self.resnet_groups is not None else min(self.in_channels // 4, 32)
# there is always at least one resnet
resnets = [
FlaxResnetBlock2D(
in_channels=self.in_channels,
out_channels=self.in_channels,
dropout=self.dropout,
groups=resnet_groups,
dtype=self.dtype,
)
]
attentions = []
for _ in range(self.num_layers):
attn_block = FlaxAttentionBlock(
channels=self.in_channels,
num_head_channels=self.attn_num_head_channels,
num_groups=resnet_groups,
dtype=self.dtype,
)
attentions.append(attn_block)
res_block = FlaxResnetBlock2D(
in_channels=self.in_channels,
out_channels=self.in_channels,
dropout=self.dropout,
groups=resnet_groups,
dtype=self.dtype,
)
resnets.append(res_block)
self.resnets = resnets
self.attentions = attentions
def __call__(self, hidden_states, deterministic=True):
hidden_states = self.resnets[0](hidden_states, deterministic=deterministic)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
hidden_states = attn(hidden_states)
hidden_states = resnet(hidden_states, deterministic=deterministic)
return hidden_states
class FlaxEncoder(nn.Module):
r"""
Flax Implementation of VAE Encoder.
This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
in_channels (:obj:`int`, *optional*, defaults to 3):
Input channels
out_channels (:obj:`int`, *optional*, defaults to 3):
Output channels
down_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`):
DownEncoder block type
block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`):
Tuple containing the number of output channels for each block
layers_per_block (:obj:`int`, *optional*, defaults to `2`):
Number of Resnet layer for each block
norm_num_groups (:obj:`int`, *optional*, defaults to `32`):
norm num group
act_fn (:obj:`str`, *optional*, defaults to `silu`):
Activation function
double_z (:obj:`bool`, *optional*, defaults to `False`):
Whether to double the last output channels
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
Parameters `dtype`
"""
in_channels: int = 3
out_channels: int = 3
down_block_types: Tuple[str] = ("DownEncoderBlock2D",)
block_out_channels: Tuple[int] = (64,)
layers_per_block: int = 2
norm_num_groups: int = 32
act_fn: str = "silu"
double_z: bool = False
dtype: jnp.dtype = jnp.float32
def setup(self):
block_out_channels = self.block_out_channels
# in
self.conv_in = nn.Conv(
block_out_channels[0],
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
# downsampling
down_blocks = []
output_channel = block_out_channels[0]
for i, _ in enumerate(self.down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = FlaxDownEncoderBlock2D(
in_channels=input_channel,
out_channels=output_channel,
num_layers=self.layers_per_block,
resnet_groups=self.norm_num_groups,
add_downsample=not is_final_block,
dtype=self.dtype,
)
down_blocks.append(down_block)
self.down_blocks = down_blocks
# middle
self.mid_block = FlaxUNetMidBlock2D(
in_channels=block_out_channels[-1],
resnet_groups=self.norm_num_groups,
attn_num_head_channels=None,
dtype=self.dtype,
)
# end
conv_out_channels = 2 * self.out_channels if self.double_z else self.out_channels
self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6)
self.conv_out = nn.Conv(
conv_out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
def __call__(self, sample, deterministic: bool = True):
# in
sample = self.conv_in(sample)
# downsampling
for block in self.down_blocks:
sample = block(sample, deterministic=deterministic)
# middle
sample = self.mid_block(sample, deterministic=deterministic)
# end
sample = self.conv_norm_out(sample)
sample = nn.swish(sample)
sample = self.conv_out(sample)
return sample
class FlaxDecoder(nn.Module):
r"""
Flax Implementation of VAE Decoder.
This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
in_channels (:obj:`int`, *optional*, defaults to 3):
Input channels
out_channels (:obj:`int`, *optional*, defaults to 3):
Output channels
up_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`):
UpDecoder block type
block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`):
Tuple containing the number of output channels for each block
layers_per_block (:obj:`int`, *optional*, defaults to `2`):
Number of Resnet layer for each block
norm_num_groups (:obj:`int`, *optional*, defaults to `32`):
norm num group
act_fn (:obj:`str`, *optional*, defaults to `silu`):
Activation function
double_z (:obj:`bool`, *optional*, defaults to `False`):
Whether to double the last output channels
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
parameters `dtype`
"""
in_channels: int = 3
out_channels: int = 3
up_block_types: Tuple[str] = ("UpDecoderBlock2D",)
block_out_channels: int = (64,)
layers_per_block: int = 2
norm_num_groups: int = 32
act_fn: str = "silu"
dtype: jnp.dtype = jnp.float32
def setup(self):
block_out_channels = self.block_out_channels
# z to block_in
self.conv_in = nn.Conv(
block_out_channels[-1],
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
# middle
self.mid_block = FlaxUNetMidBlock2D(
in_channels=block_out_channels[-1],
resnet_groups=self.norm_num_groups,
attn_num_head_channels=None,
dtype=self.dtype,
)
# upsampling
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
up_blocks = []
for i, _ in enumerate(self.up_block_types):
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
up_block = FlaxUpDecoderBlock2D(
in_channels=prev_output_channel,
out_channels=output_channel,
num_layers=self.layers_per_block + 1,
resnet_groups=self.norm_num_groups,
add_upsample=not is_final_block,
dtype=self.dtype,
)
up_blocks.append(up_block)
prev_output_channel = output_channel
self.up_blocks = up_blocks
# end
self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6)
self.conv_out = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
def __call__(self, sample, deterministic: bool = True):
# z to block_in
sample = self.conv_in(sample)
# middle
sample = self.mid_block(sample, deterministic=deterministic)
# upsampling
for block in self.up_blocks:
sample = block(sample, deterministic=deterministic)
sample = self.conv_norm_out(sample)
sample = nn.swish(sample)
sample = self.conv_out(sample)
return sample
class FlaxDiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
# Last axis to account for channels-last
self.mean, self.logvar = jnp.split(parameters, 2, axis=-1)
self.logvar = jnp.clip(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = jnp.exp(0.5 * self.logvar)
self.var = jnp.exp(self.logvar)
if self.deterministic:
self.var = self.std = jnp.zeros_like(self.mean)
def sample(self, key):
return self.mean + self.std * jax.random.normal(key, self.mean.shape)
def kl(self, other=None):
if self.deterministic:
return jnp.array([0.0])
if other is None:
return 0.5 * jnp.sum(self.mean**2 + self.var - 1.0 - self.logvar, axis=[1, 2, 3])
return 0.5 * jnp.sum(
jnp.square(self.mean - other.mean) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar,
axis=[1, 2, 3],
)
def nll(self, sample, axis=[1, 2, 3]):
if self.deterministic:
return jnp.array([0.0])
logtwopi = jnp.log(2.0 * jnp.pi)
return 0.5 * jnp.sum(logtwopi + self.logvar + jnp.square(sample - self.mean) / self.var, axis=axis)
def mode(self):
return self.mean
@flax_register_to_config
class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin):
r"""
Flax Implementation of Variational Autoencoder (VAE) model with KL loss from the paper Auto-Encoding Variational
Bayes by Diederik P. Kingma and Max Welling.
This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
in_channels (:obj:`int`, *optional*, defaults to 3):
Input channels
out_channels (:obj:`int`, *optional*, defaults to 3):
Output channels
down_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`):
DownEncoder block type
up_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`):
UpDecoder block type
block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`):
Tuple containing the number of output channels for each block
layers_per_block (:obj:`int`, *optional*, defaults to `2`):
Number of Resnet layer for each block
act_fn (:obj:`str`, *optional*, defaults to `silu`):
Activation function
latent_channels (:obj:`int`, *optional*, defaults to `4`):
Latent space channels
norm_num_groups (:obj:`int`, *optional*, defaults to `32`):
Norm num group
sample_size (:obj:`int`, *optional*, defaults to `32`):
Sample input size
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
parameters `dtype`
"""
in_channels: int = 3
out_channels: int = 3
down_block_types: Tuple[str] = ("DownEncoderBlock2D",)
up_block_types: Tuple[str] = ("UpDecoderBlock2D",)
block_out_channels: Tuple[int] = (64,)
layers_per_block: int = 1
act_fn: str = "silu"
latent_channels: int = 4
norm_num_groups: int = 32
sample_size: int = 32
dtype: jnp.dtype = jnp.float32
def setup(self):
self.encoder = FlaxEncoder(
in_channels=self.config.in_channels,
out_channels=self.config.latent_channels,
down_block_types=self.config.down_block_types,
block_out_channels=self.config.block_out_channels,
layers_per_block=self.config.layers_per_block,
act_fn=self.config.act_fn,
norm_num_groups=self.config.norm_num_groups,
double_z=True,
dtype=self.dtype,
)
self.decoder = FlaxDecoder(
in_channels=self.config.latent_channels,
out_channels=self.config.out_channels,
up_block_types=self.config.up_block_types,
block_out_channels=self.config.block_out_channels,
layers_per_block=self.config.layers_per_block,
norm_num_groups=self.config.norm_num_groups,
act_fn=self.config.act_fn,
dtype=self.dtype,
)
self.quant_conv = nn.Conv(
2 * self.config.latent_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
dtype=self.dtype,
)
self.post_quant_conv = nn.Conv(
self.config.latent_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
dtype=self.dtype,
)
def init_weights(self, rng: jax.random.PRNGKey) -> FrozenDict:
# init input tensors
sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
sample = jnp.zeros(sample_shape, dtype=jnp.float32)
params_rng, dropout_rng, gaussian_rng = jax.random.split(rng, 3)
rngs = {"params": params_rng, "dropout": dropout_rng, "gaussian": gaussian_rng}
return self.init(rngs, sample)["params"]
def encode(self, sample, deterministic: bool = True, return_dict: bool = True):
sample = jnp.transpose(sample, (0, 2, 3, 1))
hidden_states = self.encoder(sample, deterministic=deterministic)
moments = self.quant_conv(hidden_states)
posterior = FlaxDiagonalGaussianDistribution(moments)
if not return_dict:
return (posterior,)
return FlaxAutoencoderKLOutput(latent_dist=posterior)
def decode(self, latents, deterministic: bool = True, return_dict: bool = True):
if latents.shape[-1] != self.config.latent_channels:
latents = jnp.transpose(latents, (0, 2, 3, 1))
hidden_states = self.post_quant_conv(latents)
hidden_states = self.decoder(hidden_states, deterministic=deterministic)
hidden_states = jnp.transpose(hidden_states, (0, 3, 1, 2))
if not return_dict:
return (hidden_states,)
return FlaxDecoderOutput(sample=hidden_states)
def __call__(self, sample, sample_posterior=False, deterministic: bool = True, return_dict: bool = True):
posterior = self.encode(sample, deterministic=deterministic, return_dict=return_dict)
if sample_posterior:
rng = self.make_rng("gaussian")
hidden_states = posterior.latent_dist.sample(rng)
else:
hidden_states = posterior.latent_dist.mode()
sample = self.decode(hidden_states, return_dict=return_dict).sample
if not return_dict:
return (sample,)
return FlaxDecoderOutput(sample=sample)
|
diffusers-main
|
src/diffusers/models/vae_flax.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..modeling_utils import ModelMixin
from ..utils import BaseOutput
from .unet_blocks import UNetMidBlock2D, get_down_block, get_up_block
@dataclass
class DecoderOutput(BaseOutput):
"""
Output of decoding method.
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Decoded output sample of the model. Output of the last layer of the model.
"""
sample: torch.FloatTensor
@dataclass
class VQEncoderOutput(BaseOutput):
"""
Output of VQModel encoding method.
Args:
latents (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Encoded output sample of the model. Output of the last layer of the model.
"""
latents: torch.FloatTensor
@dataclass
class AutoencoderKLOutput(BaseOutput):
"""
Output of AutoencoderKL encoding method.
Args:
latent_dist (`DiagonalGaussianDistribution`):
Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
`DiagonalGaussianDistribution` allows for sampling latents from the distribution.
"""
latent_dist: "DiagonalGaussianDistribution"
class Encoder(nn.Module):
def __init__(
self,
in_channels=3,
out_channels=3,
down_block_types=("DownEncoderBlock2D",),
block_out_channels=(64,),
layers_per_block=2,
norm_num_groups=32,
act_fn="silu",
double_z=True,
):
super().__init__()
self.layers_per_block = layers_per_block
self.conv_in = torch.nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1)
self.mid_block = None
self.down_blocks = nn.ModuleList([])
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=self.layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
add_downsample=not is_final_block,
resnet_eps=1e-6,
downsample_padding=0,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
attn_num_head_channels=None,
temb_channels=None,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = UNetMidBlock2D(
in_channels=block_out_channels[-1],
resnet_eps=1e-6,
resnet_act_fn=act_fn,
output_scale_factor=1,
resnet_time_scale_shift="default",
attn_num_head_channels=None,
resnet_groups=norm_num_groups,
temb_channels=None,
)
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
self.conv_act = nn.SiLU()
conv_out_channels = 2 * out_channels if double_z else out_channels
self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1)
def forward(self, x):
sample = x
sample = self.conv_in(sample)
# down
for down_block in self.down_blocks:
sample = down_block(sample)
# middle
sample = self.mid_block(sample)
# post-process
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
return sample
class Decoder(nn.Module):
def __init__(
self,
in_channels=3,
out_channels=3,
up_block_types=("UpDecoderBlock2D",),
block_out_channels=(64,),
layers_per_block=2,
norm_num_groups=32,
act_fn="silu",
):
super().__init__()
self.layers_per_block = layers_per_block
self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1)
self.mid_block = None
self.up_blocks = nn.ModuleList([])
# mid
self.mid_block = UNetMidBlock2D(
in_channels=block_out_channels[-1],
resnet_eps=1e-6,
resnet_act_fn=act_fn,
output_scale_factor=1,
resnet_time_scale_shift="default",
attn_num_head_channels=None,
resnet_groups=norm_num_groups,
temb_channels=None,
)
# up
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
up_block = get_up_block(
up_block_type,
num_layers=self.layers_per_block + 1,
in_channels=prev_output_channel,
out_channels=output_channel,
prev_output_channel=None,
add_upsample=not is_final_block,
resnet_eps=1e-6,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
attn_num_head_channels=None,
temb_channels=None,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
self.conv_act = nn.SiLU()
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
def forward(self, z):
sample = z
sample = self.conv_in(sample)
# middle
sample = self.mid_block(sample)
# up
for up_block in self.up_blocks:
sample = up_block(sample)
# post-process
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
return sample
class VectorQuantizer(nn.Module):
"""
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix
multiplications and allows for post-hoc remapping of indices.
"""
# NOTE: due to a bug the beta term was applied to the wrong term. for
# backwards compatibility we use the buggy version by default, but you can
# specify legacy=False to fix it.
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random", sane_index_shape=False, legacy=True):
super().__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.legacy = legacy
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices."
)
else:
self.re_embed = n_e
self.sane_index_shape = sane_index_shape
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape) > 1
inds = inds.reshape(ishape[0], -1)
used = self.used.to(inds)
match = (inds[:, :, None] == used[None, None, ...]).long()
new = match.argmax(-1)
unknown = match.sum(2) < 1
if self.unknown_index == "random":
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape) > 1
inds = inds.reshape(ishape[0], -1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
return back.reshape(ishape)
def forward(self, z):
# reshape z -> (batch, height, width, channel) and flatten
z = z.permute(0, 2, 3, 1).contiguous()
z_flattened = z.view(-1, self.e_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = (
torch.sum(z_flattened**2, dim=1, keepdim=True)
+ torch.sum(self.embedding.weight**2, dim=1)
- 2 * torch.einsum("bd,dn->bn", z_flattened, self.embedding.weight.t())
)
min_encoding_indices = torch.argmin(d, dim=1)
z_q = self.embedding(min_encoding_indices).view(z.shape)
perplexity = None
min_encodings = None
# compute loss for embedding
if not self.legacy:
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
if self.remap is not None:
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
min_encoding_indices = self.remap_to_used(min_encoding_indices)
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
if self.sane_index_shape:
min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def get_codebook_entry(self, indices, shape):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
indices = indices.reshape(shape[0], -1) # add batch axis
indices = self.unmap_to_all(indices)
indices = indices.reshape(-1) # flatten again
# get quantized latent vectors
z_q = self.embedding(indices)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(
self.mean, device=self.parameters.device, dtype=self.parameters.dtype
)
def sample(self, generator: Optional[torch.Generator] = None) -> torch.FloatTensor:
device = self.parameters.device
sample_device = "cpu" if device.type == "mps" else device
sample = torch.randn(self.mean.shape, generator=generator, device=sample_device)
# make sure sample is on the same device as the parameters and has same dtype
sample = sample.to(device=device, dtype=self.parameters.dtype)
x = self.mean + self.std * sample
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar, dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,
dim=[1, 2, 3],
)
def nll(self, sample, dims=[1, 2, 3]):
if self.deterministic:
return torch.Tensor([0.0])
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, dim=dims)
def mode(self):
return self.mean
class VQModel(ModelMixin, ConfigMixin):
r"""VQ-VAE model from the paper Neural Discrete Representation Learning by Aaron van den Oord, Oriol Vinyals and Koray
Kavukcuoglu.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the model (such as downloading or saving, etc.)
Parameters:
in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
out_channels (int, *optional*, defaults to 3): Number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to :
obj:`("DownEncoderBlock2D",)`): Tuple of downsample block types.
up_block_types (`Tuple[str]`, *optional*, defaults to :
obj:`("UpDecoderBlock2D",)`): Tuple of upsample block types.
block_out_channels (`Tuple[int]`, *optional*, defaults to :
obj:`(64,)`): Tuple of block output channels.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
latent_channels (`int`, *optional*, defaults to `3`): Number of channels in the latent space.
sample_size (`int`, *optional*, defaults to `32`): TODO
num_vq_embeddings (`int`, *optional*, defaults to `256`): Number of codebook vectors in the VQ-VAE.
"""
@register_to_config
def __init__(
self,
in_channels: int = 3,
out_channels: int = 3,
down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
block_out_channels: Tuple[int] = (64,),
layers_per_block: int = 1,
act_fn: str = "silu",
latent_channels: int = 3,
sample_size: int = 32,
num_vq_embeddings: int = 256,
norm_num_groups: int = 32,
):
super().__init__()
# pass init params to Encoder
self.encoder = Encoder(
in_channels=in_channels,
out_channels=latent_channels,
down_block_types=down_block_types,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
act_fn=act_fn,
norm_num_groups=norm_num_groups,
double_z=False,
)
self.quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1)
self.quantize = VectorQuantizer(
num_vq_embeddings, latent_channels, beta=0.25, remap=None, sane_index_shape=False
)
self.post_quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1)
# pass init params to Decoder
self.decoder = Decoder(
in_channels=latent_channels,
out_channels=out_channels,
up_block_types=up_block_types,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
act_fn=act_fn,
norm_num_groups=norm_num_groups,
)
def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> VQEncoderOutput:
h = self.encoder(x)
h = self.quant_conv(h)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=h)
def decode(
self, h: torch.FloatTensor, force_not_quantize: bool = False, return_dict: bool = True
) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
quant, emb_loss, info = self.quantize(h)
else:
quant = h
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
def forward(self, sample: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
r"""
Args:
sample (`torch.FloatTensor`): Input sample.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
"""
x = sample
h = self.encode(x).latents
dec = self.decode(h).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
class AutoencoderKL(ModelMixin, ConfigMixin):
r"""Variational Autoencoder (VAE) model with KL loss from the paper Auto-Encoding Variational Bayes by Diederik P. Kingma
and Max Welling.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the model (such as downloading or saving, etc.)
Parameters:
in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
out_channels (int, *optional*, defaults to 3): Number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to :
obj:`("DownEncoderBlock2D",)`): Tuple of downsample block types.
up_block_types (`Tuple[str]`, *optional*, defaults to :
obj:`("UpDecoderBlock2D",)`): Tuple of upsample block types.
block_out_channels (`Tuple[int]`, *optional*, defaults to :
obj:`(64,)`): Tuple of block output channels.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
latent_channels (`int`, *optional*, defaults to `4`): Number of channels in the latent space.
sample_size (`int`, *optional*, defaults to `32`): TODO
"""
@register_to_config
def __init__(
self,
in_channels: int = 3,
out_channels: int = 3,
down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
block_out_channels: Tuple[int] = (64,),
layers_per_block: int = 1,
act_fn: str = "silu",
latent_channels: int = 4,
norm_num_groups: int = 32,
sample_size: int = 32,
):
super().__init__()
# pass init params to Encoder
self.encoder = Encoder(
in_channels=in_channels,
out_channels=latent_channels,
down_block_types=down_block_types,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
act_fn=act_fn,
norm_num_groups=norm_num_groups,
double_z=True,
)
# pass init params to Decoder
self.decoder = Decoder(
in_channels=latent_channels,
out_channels=out_channels,
up_block_types=up_block_types,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
norm_num_groups=norm_num_groups,
act_fn=act_fn,
)
self.quant_conv = torch.nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
self.post_quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1)
def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=posterior)
def decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
z = self.post_quant_conv(z)
dec = self.decoder(z)
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
def forward(
self,
sample: torch.FloatTensor,
sample_posterior: bool = False,
return_dict: bool = True,
generator: Optional[torch.Generator] = None,
) -> Union[DecoderOutput, torch.FloatTensor]:
r"""
Args:
sample (`torch.FloatTensor`): Input sample.
sample_posterior (`bool`, *optional*, defaults to `False`):
Whether to sample from the posterior.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
"""
x = sample
posterior = self.encode(x).latent_dist
if sample_posterior:
z = posterior.sample(generator=generator)
else:
z = posterior.mode()
dec = self.decode(z).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
|
diffusers-main
|
src/diffusers/models/vae.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_torch_available, is_transformers_available
from . import BaseDiffusersCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseDiffusersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env")
download_parser.set_defaults(func=info_command_factory)
def run(self):
hub_version = huggingface_hub.__version__
pt_version = "not installed"
pt_cuda_available = "NA"
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
transformers_version = "not installed"
if is_transformers_available:
import transformers
transformers_version = transformers.__version__
info = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
|
diffusers-main
|
src/diffusers/commands/env.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseDiffusersCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
|
diffusers-main
|
src/diffusers/commands/__init__.py
|
#!/usr/bin/env python
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from .env import EnvironmentCommand
def main():
parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
# Register commands
EnvironmentCommand.register_subcommand(commands_parser)
# Let's go
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
# Run
service = args.func(args)
service.run()
if __name__ == "__main__":
main()
|
diffusers-main
|
src/diffusers/commands/diffusers_cli.py
|
# Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class KarrasVeSchedulerState:
# setable values
num_inference_steps: Optional[int] = None
timesteps: Optional[jnp.ndarray] = None
schedule: Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def create(cls):
return cls()
@dataclass
class FlaxKarrasVeOutput(BaseOutput):
"""
Output class for the scheduler's step function output.
Args:
prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
Derivative of predicted original image sample (x_0).
state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
"""
prev_sample: jnp.ndarray
derivative: jnp.ndarray
state: KarrasVeSchedulerState
class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin):
"""
Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and
the VE column of Table 1 from [1] for reference.
[1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models."
https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic
differential equations." https://arxiv.org/abs/2011.13456
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of
Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the
optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper.
Args:
sigma_min (`float`): minimum noise magnitude
sigma_max (`float`): maximum noise magnitude
s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling.
A reasonable range is [1.000, 1.011].
s_churn (`float`): the parameter controlling the overall amount of stochasticity.
A reasonable range is [0, 100].
s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity).
A reasonable range is [0, 10].
s_max (`float`): the end value of the sigma range where we add noise.
A reasonable range is [0.2, 80].
"""
@register_to_config
def __init__(
self,
sigma_min: float = 0.02,
sigma_max: float = 100,
s_noise: float = 1.007,
s_churn: float = 80,
s_min: float = 0.05,
s_max: float = 50,
):
self.state = KarrasVeSchedulerState.create()
def set_timesteps(
self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple
) -> KarrasVeSchedulerState:
"""
Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
state (`KarrasVeSchedulerState`):
the `FlaxKarrasVeScheduler` state data class.
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
timesteps = jnp.arange(0, num_inference_steps)[::-1].copy()
schedule = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=num_inference_steps,
schedule=jnp.array(schedule, dtype=jnp.float32),
timesteps=timesteps,
)
def add_noise_to_input(
self,
state: KarrasVeSchedulerState,
sample: jnp.ndarray,
sigma: float,
key: random.KeyArray,
) -> Tuple[jnp.ndarray, float]:
"""
Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a
higher noise level sigma_hat = sigma_i + gamma_i*sigma_i.
TODO Args:
"""
if self.config.s_min <= sigma <= self.config.s_max:
gamma = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1)
else:
gamma = 0
# sample eps ~ N(0, S_noise^2 * I)
key = random.split(key, num=1)
eps = self.config.s_noise * random.normal(key=key, shape=sample.shape)
sigma_hat = sigma + gamma * sigma
sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def step(
self,
state: KarrasVeSchedulerState,
model_output: jnp.ndarray,
sigma_hat: float,
sigma_prev: float,
sample_hat: jnp.ndarray,
return_dict: bool = True,
) -> Union[FlaxKarrasVeOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model.
sigma_hat (`float`): TODO
sigma_prev (`float`): TODO
sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO
return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class
Returns:
[`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion
chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is
True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
"""
pred_original_sample = sample_hat + sigma_hat * model_output
derivative = (sample_hat - pred_original_sample) / sigma_hat
sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state)
def step_correct(
self,
state: KarrasVeSchedulerState,
model_output: jnp.ndarray,
sigma_hat: float,
sigma_prev: float,
sample_hat: jnp.ndarray,
sample_prev: jnp.ndarray,
derivative: jnp.ndarray,
return_dict: bool = True,
) -> Union[FlaxKarrasVeOutput, Tuple]:
"""
Correct the predicted sample based on the output model_output of the network. TODO complete description
Args:
state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model.
sigma_hat (`float`): TODO
sigma_prev (`float`): TODO
sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO
sample_prev (`torch.FloatTensor` or `np.ndarray`): TODO
derivative (`torch.FloatTensor` or `np.ndarray`): TODO
return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class
Returns:
prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO
"""
pred_original_sample = sample_prev + sigma_prev * model_output
derivative_corr = (sample_prev - pred_original_sample) / sigma_prev
sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state)
def add_noise(self, original_samples, noise, timesteps):
raise NotImplementedError()
|
diffusers-main
|
src/diffusers/schedulers/scheduling_karras_ve_flax.py
|
# Copyright 2022 UC Berkeley Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils import SchedulerMixin
@dataclass
class DDPMSchedulerOutput(BaseOutput):
"""
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
class DDPMScheduler(SchedulerMixin, ConfigMixin):
"""
Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and
Langevin dynamics sampling.
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
For more details, see the original paper: https://arxiv.org/abs/2006.11239
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`np.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
variance_type (`str`):
options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
`fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
clip_sample (`bool`, default `True`):
option to clip predicted sample between -1 and 1 for numerical stability.
"""
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[np.ndarray] = None,
variance_type: str = "fixed_small",
clip_sample: bool = True,
):
if trained_betas is not None:
self.betas = torch.from_numpy(trained_betas)
elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = (
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
elif beta_schedule == "sigmoid":
# GeoDiff sigmoid schedule
betas = torch.linspace(-6, 6, num_train_timesteps)
self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.one = torch.tensor(1.0)
# standard deviation of the initial noise distribution
self.init_noise_sigma = 1.0
# setable values
self.num_inference_steps = None
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
self.variance_type = variance_type
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.FloatTensor`): input sample
timestep (`int`, optional): current timestep
Returns:
`torch.FloatTensor`: scaled input sample
"""
return sample
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
"""
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps)
self.num_inference_steps = num_inference_steps
timesteps = np.arange(
0, self.config.num_train_timesteps, self.config.num_train_timesteps // self.num_inference_steps
)[::-1].copy()
self.timesteps = torch.from_numpy(timesteps).to(device)
def _get_variance(self, t, predicted_variance=None, variance_type=None):
alpha_prod_t = self.alphas_cumprod[t]
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t]
if variance_type is None:
variance_type = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
variance = torch.clamp(variance, min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
variance = torch.log(torch.clamp(variance, min=1e-20))
elif variance_type == "fixed_large":
variance = self.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
variance = torch.log(self.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
min_log = variance
max_log = self.betas[t]
frac = (predicted_variance + 1) / 2
variance = frac * max_log + (1 - frac) * min_log
return variance
def step(
self,
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
predict_epsilon=True,
generator=None,
return_dict: bool = True,
) -> Union[DDPMSchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
predict_epsilon (`bool`):
optional flag to use when model predicts the samples directly instead of the noise, epsilon.
generator: random number generator.
return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
Returns:
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
t = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
else:
predicted_variance = None
# 1. compute alphas, betas
alpha_prod_t = self.alphas_cumprod[t]
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if predict_epsilon:
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
else:
pred_original_sample = model_output
# 3. Clip "predicted x_0"
if self.config.clip_sample:
pred_original_sample = torch.clamp(pred_original_sample, -1, 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
variance = 0
if t > 0:
noise = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator
).to(model_output.device)
variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise
pred_prev_sample = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
def add_noise(
self,
original_samples: torch.FloatTensor,
noise: torch.FloatTensor,
timesteps: torch.IntTensor,
) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
timesteps = timesteps.to(original_samples.device)
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_ddpm.py
|
# Copyright 2022 Zhejiang University Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import FlaxSchedulerMixin, FlaxSchedulerOutput
def betas_for_alpha_bar(num_diffusion_timesteps: int, max_beta=0.999) -> jnp.ndarray:
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return jnp.array(betas, dtype=jnp.float32)
@flax.struct.dataclass
class PNDMSchedulerState:
# setable values
_timesteps: jnp.ndarray
num_inference_steps: Optional[int] = None
prk_timesteps: Optional[jnp.ndarray] = None
plms_timesteps: Optional[jnp.ndarray] = None
timesteps: Optional[jnp.ndarray] = None
# running values
cur_model_output: Optional[jnp.ndarray] = None
counter: int = 0
cur_sample: Optional[jnp.ndarray] = None
ets: jnp.ndarray = jnp.array([])
@classmethod
def create(cls, num_train_timesteps: int):
return cls(_timesteps=jnp.arange(0, num_train_timesteps)[::-1])
@dataclass
class FlaxPNDMSchedulerOutput(FlaxSchedulerOutput):
state: PNDMSchedulerState
class FlaxPNDMScheduler(FlaxSchedulerMixin, ConfigMixin):
"""
Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques,
namely Runge-Kutta method and a linear multi-step method.
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
For more details, see the original paper: https://arxiv.org/abs/2202.09778
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`jnp.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
skip_prk_steps (`bool`):
allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required
before plms steps; defaults to `False`.
set_alpha_to_one (`bool`, default `False`):
each diffusion step uses the value of alphas product at that step and at the previous one. For the final
step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
otherwise it uses the value of alpha at step 0.
steps_offset (`int`, default `0`):
an offset added to the inference steps. You can use a combination of `offset=1` and
`set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
stable diffusion.
"""
@property
def has_state(self):
return True
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[jnp.ndarray] = None,
skip_prk_steps: bool = False,
set_alpha_to_one: bool = False,
steps_offset: int = 0,
):
if trained_betas is not None:
self.betas = jnp.asarray(trained_betas)
elif beta_schedule == "linear":
self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = jnp.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=jnp.float32) ** 2
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = jnp.cumprod(self.alphas, axis=0)
self.final_alpha_cumprod = jnp.array(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
self.pndm_order = 4
# standard deviation of the initial noise distribution
self.init_noise_sigma = 1.0
def create_state(self):
return PNDMSchedulerState.create(num_train_timesteps=self.config.num_train_timesteps)
def set_timesteps(self, state: PNDMSchedulerState, num_inference_steps: int, shape: Tuple) -> PNDMSchedulerState:
"""
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
state (`PNDMSchedulerState`):
the `FlaxPNDMScheduler` state data class instance.
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
offset = self.config.steps_offset
step_ratio = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round() + offset
state = state.replace(num_inference_steps=num_inference_steps, _timesteps=_timesteps)
if self.config.skip_prk_steps:
# for some models like stable diffusion the prk steps can/should be skipped to
# produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation
# is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51
state = state.replace(
prk_timesteps=jnp.array([]),
plms_timesteps=jnp.concatenate(
[state._timesteps[:-1], state._timesteps[-2:-1], state._timesteps[-1:]]
)[::-1],
)
else:
prk_timesteps = jnp.array(state._timesteps[-self.pndm_order :]).repeat(2) + jnp.tile(
jnp.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order
)
state = state.replace(
prk_timesteps=(prk_timesteps[:-1].repeat(2)[1:-1])[::-1],
plms_timesteps=state._timesteps[:-3][::-1],
)
return state.replace(
timesteps=jnp.concatenate([state.prk_timesteps, state.plms_timesteps]).astype(jnp.int32),
counter=0,
# Reserve space for the state variables
cur_model_output=jnp.zeros(shape),
cur_sample=jnp.zeros(shape),
ets=jnp.zeros((4,) + shape),
)
def scale_model_input(
self, state: PNDMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None
) -> jnp.ndarray:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
sample (`jnp.ndarray`): input sample
timestep (`int`, optional): current timestep
Returns:
`jnp.ndarray`: scaled input sample
"""
return sample
def step(
self,
state: PNDMSchedulerState,
model_output: jnp.ndarray,
timestep: int,
sample: jnp.ndarray,
return_dict: bool = True,
) -> Union[FlaxPNDMSchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`.
Args:
state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class
Returns:
[`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is the sample tensor.
"""
if self.config.skip_prk_steps:
prev_sample, state = self.step_plms(
state=state, model_output=model_output, timestep=timestep, sample=sample
)
else:
prev_sample, state = jax.lax.switch(
jnp.where(state.counter < len(state.prk_timesteps), 0, 1),
(self.step_prk, self.step_plms),
# Args to either branch
state,
model_output,
timestep,
sample,
)
if not return_dict:
return (prev_sample, state)
return FlaxPNDMSchedulerOutput(prev_sample=prev_sample, state=state)
def step_prk(
self,
state: PNDMSchedulerState,
model_output: jnp.ndarray,
timestep: int,
sample: jnp.ndarray,
) -> Union[FlaxPNDMSchedulerOutput, Tuple]:
"""
Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the
solution to the differential equation.
Args:
state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class
Returns:
[`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is the sample tensor.
"""
if state.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
diff_to_prev = jnp.where(
state.counter % 2, 0, self.config.num_train_timesteps // state.num_inference_steps // 2
)
prev_timestep = timestep - diff_to_prev
timestep = state.prk_timesteps[state.counter // 4 * 4]
def remainder_0(state: PNDMSchedulerState, model_output: jnp.ndarray, ets_at: int):
return (
state.replace(
cur_model_output=state.cur_model_output + 1 / 6 * model_output,
ets=state.ets.at[ets_at].set(model_output),
cur_sample=sample,
),
model_output,
)
def remainder_1(state: PNDMSchedulerState, model_output: jnp.ndarray, ets_at: int):
return state.replace(cur_model_output=state.cur_model_output + 1 / 3 * model_output), model_output
def remainder_2(state: PNDMSchedulerState, model_output: jnp.ndarray, ets_at: int):
return state.replace(cur_model_output=state.cur_model_output + 1 / 3 * model_output), model_output
def remainder_3(state: PNDMSchedulerState, model_output: jnp.ndarray, ets_at: int):
model_output = state.cur_model_output + 1 / 6 * model_output
return state.replace(cur_model_output=jnp.zeros_like(state.cur_model_output)), model_output
state, model_output = jax.lax.switch(
state.counter % 4,
(remainder_0, remainder_1, remainder_2, remainder_3),
# Args to either branch
state,
model_output,
state.counter // 4,
)
cur_sample = state.cur_sample
prev_sample = self._get_prev_sample(cur_sample, timestep, prev_timestep, model_output)
state = state.replace(counter=state.counter + 1)
return (prev_sample, state)
def step_plms(
self,
state: PNDMSchedulerState,
model_output: jnp.ndarray,
timestep: int,
sample: jnp.ndarray,
) -> Union[FlaxPNDMSchedulerOutput, Tuple]:
"""
Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple
times to approximate the solution.
Args:
state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class
Returns:
[`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is the sample tensor.
"""
if state.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
if not self.config.skip_prk_steps and len(state.ets) < 3:
raise ValueError(
f"{self.__class__} can only be run AFTER scheduler has been run "
"in 'prk' mode for at least 12 iterations "
"See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py "
"for more information."
)
prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps
prev_timestep = jnp.where(prev_timestep > 0, prev_timestep, 0)
# Reference:
# if state.counter != 1:
# state.ets.append(model_output)
# else:
# prev_timestep = timestep
# timestep = timestep + self.config.num_train_timesteps // state.num_inference_steps
prev_timestep = jnp.where(state.counter == 1, timestep, prev_timestep)
timestep = jnp.where(
state.counter == 1, timestep + self.config.num_train_timesteps // state.num_inference_steps, timestep
)
# Reference:
# if len(state.ets) == 1 and state.counter == 0:
# model_output = model_output
# state.cur_sample = sample
# elif len(state.ets) == 1 and state.counter == 1:
# model_output = (model_output + state.ets[-1]) / 2
# sample = state.cur_sample
# state.cur_sample = None
# elif len(state.ets) == 2:
# model_output = (3 * state.ets[-1] - state.ets[-2]) / 2
# elif len(state.ets) == 3:
# model_output = (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12
# else:
# model_output = (1 / 24) * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4])
def counter_0(state: PNDMSchedulerState):
ets = state.ets.at[0].set(model_output)
return state.replace(
ets=ets,
cur_sample=sample,
cur_model_output=jnp.array(model_output, dtype=jnp.float32),
)
def counter_1(state: PNDMSchedulerState):
return state.replace(
cur_model_output=(model_output + state.ets[0]) / 2,
)
def counter_2(state: PNDMSchedulerState):
ets = state.ets.at[1].set(model_output)
return state.replace(
ets=ets,
cur_model_output=(3 * ets[1] - ets[0]) / 2,
cur_sample=sample,
)
def counter_3(state: PNDMSchedulerState):
ets = state.ets.at[2].set(model_output)
return state.replace(
ets=ets,
cur_model_output=(23 * ets[2] - 16 * ets[1] + 5 * ets[0]) / 12,
cur_sample=sample,
)
def counter_other(state: PNDMSchedulerState):
ets = state.ets.at[3].set(model_output)
next_model_output = (1 / 24) * (55 * ets[3] - 59 * ets[2] + 37 * ets[1] - 9 * ets[0])
ets = ets.at[0].set(ets[1])
ets = ets.at[1].set(ets[2])
ets = ets.at[2].set(ets[3])
return state.replace(
ets=ets,
cur_model_output=next_model_output,
cur_sample=sample,
)
counter = jnp.clip(state.counter, 0, 4)
state = jax.lax.switch(
counter,
[counter_0, counter_1, counter_2, counter_3, counter_other],
state,
)
sample = state.cur_sample
model_output = state.cur_model_output
prev_sample = self._get_prev_sample(sample, timestep, prev_timestep, model_output)
state = state.replace(counter=state.counter + 1)
return (prev_sample, state)
def _get_prev_sample(self, sample, timestep, prev_timestep, model_output):
# See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf
# this function computes x_(t−δ) using the formula of (9)
# Note that x_t needs to be added to both sides of the equation
# Notation (<variable name> -> <name in paper>
# alpha_prod_t -> α_t
# alpha_prod_t_prev -> α_(t−δ)
# beta_prod_t -> (1 - α_t)
# beta_prod_t_prev -> (1 - α_(t−δ))
# sample -> x_t
# model_output -> e_θ(x_t, t)
# prev_sample -> x_(t−δ)
alpha_prod_t = self.alphas_cumprod[timestep]
alpha_prod_t_prev = jnp.where(prev_timestep >= 0, self.alphas_cumprod[prev_timestep], self.final_alpha_cumprod)
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
# corresponds to (α_(t−δ) - α_t) divided by
# denominator of x_t in formula (9) and plus 1
# Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) =
# sqrt(α_(t−δ)) / sqrt(α_t))
sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5)
# corresponds to denominator of e_θ(x_t, t) in formula (9)
model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + (
alpha_prod_t * beta_prod_t * alpha_prod_t_prev
) ** (0.5)
# full formula (9)
prev_sample = (
sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff
)
return prev_sample
def add_noise(
self,
original_samples: jnp.ndarray,
noise: jnp.ndarray,
timesteps: jnp.ndarray,
) -> jnp.ndarray:
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod[..., None]
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod[..., None]
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_pndm_flax.py
|
# Copyright 2022 Google Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin
class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin):
"""
The variance preserving stochastic differential equation (SDE) scheduler.
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
For more information, see the original paper: https://arxiv.org/abs/2011.13456
UNDER CONSTRUCTION
"""
@register_to_config
def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3):
self.sigmas = None
self.discrete_sigmas = None
self.timesteps = None
def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None):
self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device)
def step_pred(self, score, x, t, generator=None):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
)
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
log_mean_coeff = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
std = std.flatten()
while len(std.shape) < len(score.shape):
std = std.unsqueeze(-1)
score = -score / std
# compute
dt = -1.0 / len(self.timesteps)
beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
beta_t = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
beta_t = beta_t.unsqueeze(-1)
drift = -0.5 * beta_t * x
diffusion = torch.sqrt(beta_t)
drift = drift - diffusion**2 * score
x_mean = x + drift * dt
# add noise
noise = torch.randn(x.shape, layout=x.layout, generator=generator).to(x.device)
x = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_sde_vp.py
|
# Copyright 2022 UC Berkeley Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import FlaxSchedulerMixin, FlaxSchedulerOutput
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999) -> jnp.ndarray:
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return jnp.array(betas, dtype=jnp.float32)
@flax.struct.dataclass
class DDPMSchedulerState:
# setable values
timesteps: jnp.ndarray
num_inference_steps: Optional[int] = None
@classmethod
def create(cls, num_train_timesteps: int):
return cls(timesteps=jnp.arange(0, num_train_timesteps)[::-1])
@dataclass
class FlaxDDPMSchedulerOutput(FlaxSchedulerOutput):
state: DDPMSchedulerState
class FlaxDDPMScheduler(FlaxSchedulerMixin, ConfigMixin):
"""
Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and
Langevin dynamics sampling.
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
For more details, see the original paper: https://arxiv.org/abs/2006.11239
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`np.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
variance_type (`str`):
options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
`fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
clip_sample (`bool`, default `True`):
option to clip predicted sample between -1 and 1 for numerical stability.
tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays.
"""
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[jnp.ndarray] = None,
variance_type: str = "fixed_small",
clip_sample: bool = True,
):
if trained_betas is not None:
self.betas = jnp.asarray(trained_betas)
elif beta_schedule == "linear":
self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = jnp.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=jnp.float32) ** 2
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = jnp.cumprod(self.alphas, axis=0)
self.one = jnp.array(1.0)
self.state = DDPMSchedulerState.create(num_train_timesteps=num_train_timesteps)
self.variance_type = variance_type
def set_timesteps(self, state: DDPMSchedulerState, num_inference_steps: int, shape: Tuple) -> DDPMSchedulerState:
"""
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
state (`DDIMSchedulerState`):
the `FlaxDDPMScheduler` state data class instance.
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps)
timesteps = jnp.arange(
0, self.config.num_train_timesteps, self.config.num_train_timesteps // num_inference_steps
)[::-1]
return state.replace(num_inference_steps=num_inference_steps, timesteps=timesteps)
def _get_variance(self, t, predicted_variance=None, variance_type=None):
alpha_prod_t = self.alphas_cumprod[t]
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t]
if variance_type is None:
variance_type = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
variance = jnp.clip(variance, a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
variance = jnp.log(jnp.clip(variance, a_min=1e-20))
elif variance_type == "fixed_large":
variance = self.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
variance = jnp.log(self.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
min_log = variance
max_log = self.betas[t]
frac = (predicted_variance + 1) / 2
variance = frac * max_log + (1 - frac) * min_log
return variance
def step(
self,
state: DDPMSchedulerState,
model_output: jnp.ndarray,
timestep: int,
sample: jnp.ndarray,
key: random.KeyArray,
predict_epsilon: bool = True,
return_dict: bool = True,
) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
state (`DDPMSchedulerState`): the `FlaxDDPMScheduler` state data class instance.
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
key (`random.KeyArray`): a PRNG key.
predict_epsilon (`bool`):
optional flag to use when model predicts the samples directly instead of the noise, epsilon.
return_dict (`bool`): option for returning tuple rather than FlaxDDPMSchedulerOutput class
Returns:
[`FlaxDDPMSchedulerOutput`] or `tuple`: [`FlaxDDPMSchedulerOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is the sample tensor.
"""
t = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
model_output, predicted_variance = jnp.split(model_output, sample.shape[1], axis=1)
else:
predicted_variance = None
# 1. compute alphas, betas
alpha_prod_t = self.alphas_cumprod[t]
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if predict_epsilon:
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
else:
pred_original_sample = model_output
# 3. Clip "predicted x_0"
if self.config.clip_sample:
pred_original_sample = jnp.clip(pred_original_sample, -1, 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
variance = 0
if t > 0:
key = random.split(key, num=1)
noise = random.normal(key=key, shape=model_output.shape)
variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise
pred_prev_sample = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=pred_prev_sample, state=state)
def add_noise(
self,
original_samples: jnp.ndarray,
noise: jnp.ndarray,
timesteps: jnp.ndarray,
) -> jnp.ndarray:
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod[..., None]
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod[..., None]
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_ddpm_flax.py
|
# Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils import SchedulerMixin
@dataclass
class KarrasVeOutput(BaseOutput):
"""
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
derivative (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Derivative of predicted original image sample (x_0).
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
"""
prev_sample: torch.FloatTensor
derivative: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
class KarrasVeScheduler(SchedulerMixin, ConfigMixin):
"""
Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and
the VE column of Table 1 from [1] for reference.
[1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models."
https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic
differential equations." https://arxiv.org/abs/2011.13456
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of
Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the
optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper.
Args:
sigma_min (`float`): minimum noise magnitude
sigma_max (`float`): maximum noise magnitude
s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling.
A reasonable range is [1.000, 1.011].
s_churn (`float`): the parameter controlling the overall amount of stochasticity.
A reasonable range is [0, 100].
s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity).
A reasonable range is [0, 10].
s_max (`float`): the end value of the sigma range where we add noise.
A reasonable range is [0.2, 80].
"""
@register_to_config
def __init__(
self,
sigma_min: float = 0.02,
sigma_max: float = 100,
s_noise: float = 1.007,
s_churn: float = 80,
s_min: float = 0.05,
s_max: float = 50,
):
# standard deviation of the initial noise distribution
self.init_noise_sigma = sigma_max
# setable values
self.num_inference_steps: int = None
self.timesteps: np.IntTensor = None
self.schedule: torch.FloatTensor = None # sigma(t_i)
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.FloatTensor`): input sample
timestep (`int`, optional): current timestep
Returns:
`torch.FloatTensor`: scaled input sample
"""
return sample
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
"""
Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
self.num_inference_steps = num_inference_steps
timesteps = np.arange(0, self.num_inference_steps)[::-1].copy()
self.timesteps = torch.from_numpy(timesteps).to(device)
schedule = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
self.schedule = torch.tensor(schedule, dtype=torch.float32, device=device)
def add_noise_to_input(
self, sample: torch.FloatTensor, sigma: float, generator: Optional[torch.Generator] = None
) -> Tuple[torch.FloatTensor, float]:
"""
Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a
higher noise level sigma_hat = sigma_i + gamma_i*sigma_i.
TODO Args:
"""
if self.config.s_min <= sigma <= self.config.s_max:
gamma = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1)
else:
gamma = 0
# sample eps ~ N(0, S_noise^2 * I)
eps = self.config.s_noise * torch.randn(sample.shape, generator=generator).to(sample.device)
sigma_hat = sigma + gamma * sigma
sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def step(
self,
model_output: torch.FloatTensor,
sigma_hat: float,
sigma_prev: float,
sample_hat: torch.FloatTensor,
return_dict: bool = True,
) -> Union[KarrasVeOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
sigma_hat (`float`): TODO
sigma_prev (`float`): TODO
sample_hat (`torch.FloatTensor`): TODO
return_dict (`bool`): option for returning tuple rather than KarrasVeOutput class
KarrasVeOutput: updated sample in the diffusion chain and derivative (TODO double check).
Returns:
[`~schedulers.scheduling_karras_ve.KarrasVeOutput`] or `tuple`:
[`~schedulers.scheduling_karras_ve.KarrasVeOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
pred_original_sample = sample_hat + sigma_hat * model_output
derivative = (sample_hat - pred_original_sample) / sigma_hat
sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample
)
def step_correct(
self,
model_output: torch.FloatTensor,
sigma_hat: float,
sigma_prev: float,
sample_hat: torch.FloatTensor,
sample_prev: torch.FloatTensor,
derivative: torch.FloatTensor,
return_dict: bool = True,
) -> Union[KarrasVeOutput, Tuple]:
"""
Correct the predicted sample based on the output model_output of the network. TODO complete description
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
sigma_hat (`float`): TODO
sigma_prev (`float`): TODO
sample_hat (`torch.FloatTensor`): TODO
sample_prev (`torch.FloatTensor`): TODO
derivative (`torch.FloatTensor`): TODO
return_dict (`bool`): option for returning tuple rather than KarrasVeOutput class
Returns:
prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO
"""
pred_original_sample = sample_prev + sigma_prev * model_output
derivative_corr = (sample_prev - pred_original_sample) / sigma_prev
sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample
)
def add_noise(self, original_samples, noise, timesteps):
raise NotImplementedError()
|
diffusers-main
|
src/diffusers/schedulers/scheduling_karras_ve.py
|
# Copyright 2022 Google Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import FlaxSchedulerMixin, FlaxSchedulerOutput
@flax.struct.dataclass
class ScoreSdeVeSchedulerState:
# setable values
timesteps: Optional[jnp.ndarray] = None
discrete_sigmas: Optional[jnp.ndarray] = None
sigmas: Optional[jnp.ndarray] = None
@classmethod
def create(cls):
return cls()
@dataclass
class FlaxSdeVeOutput(FlaxSchedulerOutput):
"""
Output class for the ScoreSdeVeScheduler's step function output.
Args:
state (`ScoreSdeVeSchedulerState`):
prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
prev_sample_mean (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
Mean averaged `prev_sample`. Same as `prev_sample`, only mean-averaged over previous timesteps.
"""
state: ScoreSdeVeSchedulerState
prev_sample: jnp.ndarray
prev_sample_mean: Optional[jnp.ndarray] = None
class FlaxScoreSdeVeScheduler(FlaxSchedulerMixin, ConfigMixin):
"""
The variance exploding stochastic differential equation (SDE) scheduler.
For more information, see the original paper: https://arxiv.org/abs/2011.13456
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
snr (`float`):
coefficient weighting the step from the model_output sample (from the network) to the random noise.
sigma_min (`float`):
initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the
distribution of the data.
sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model.
sampling_eps (`float`): the end value of sampling, where timesteps decrease progressively from 1 to
epsilon.
correct_steps (`int`): number of correction steps performed on a produced sample.
"""
@register_to_config
def __init__(
self,
num_train_timesteps: int = 2000,
snr: float = 0.15,
sigma_min: float = 0.01,
sigma_max: float = 1348.0,
sampling_eps: float = 1e-5,
correct_steps: int = 1,
):
state = ScoreSdeVeSchedulerState.create()
self.state = self.set_sigmas(state, num_train_timesteps, sigma_min, sigma_max, sampling_eps)
def set_timesteps(
self, state: ScoreSdeVeSchedulerState, num_inference_steps: int, shape: Tuple, sampling_eps: float = None
) -> ScoreSdeVeSchedulerState:
"""
Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance.
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
sampling_eps (`float`, optional): final timestep value (overrides value given at Scheduler instantiation).
"""
sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps
timesteps = jnp.linspace(1, sampling_eps, num_inference_steps)
return state.replace(timesteps=timesteps)
def set_sigmas(
self,
state: ScoreSdeVeSchedulerState,
num_inference_steps: int,
sigma_min: float = None,
sigma_max: float = None,
sampling_eps: float = None,
) -> ScoreSdeVeSchedulerState:
"""
Sets the noise scales used for the diffusion chain. Supporting function to be run before inference.
The sigmas control the weight of the `drift` and `diffusion` components of sample update.
Args:
state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance.
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
sigma_min (`float`, optional):
initial noise scale value (overrides value given at Scheduler instantiation).
sigma_max (`float`, optional): final noise scale value (overrides value given at Scheduler instantiation).
sampling_eps (`float`, optional): final timestep value (overrides value given at Scheduler instantiation).
"""
sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min
sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max
sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if state.timesteps is None:
state = self.set_timesteps(state, num_inference_steps, sampling_eps)
discrete_sigmas = jnp.exp(jnp.linspace(jnp.log(sigma_min), jnp.log(sigma_max), num_inference_steps))
sigmas = jnp.array([sigma_min * (sigma_max / sigma_min) ** t for t in state.timesteps])
return state.replace(discrete_sigmas=discrete_sigmas, sigmas=sigmas)
def get_adjacent_sigma(self, state, timesteps, t):
return jnp.where(timesteps == 0, jnp.zeros_like(t), state.discrete_sigmas[timesteps - 1])
def step_pred(
self,
state: ScoreSdeVeSchedulerState,
model_output: jnp.ndarray,
timestep: int,
sample: jnp.ndarray,
key: random.KeyArray,
return_dict: bool = True,
) -> Union[FlaxSdeVeOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance.
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
generator: random number generator.
return_dict (`bool`): option for returning tuple rather than FlaxSdeVeOutput class
Returns:
[`FlaxSdeVeOutput`] or `tuple`: [`FlaxSdeVeOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
if state.timesteps is None:
raise ValueError(
"`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
)
timestep = timestep * jnp.ones(
sample.shape[0],
)
timesteps = (timestep * (len(state.timesteps) - 1)).long()
sigma = state.discrete_sigmas[timesteps]
adjacent_sigma = self.get_adjacent_sigma(state, timesteps, timestep)
drift = jnp.zeros_like(sample)
diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
diffusion = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
diffusion = diffusion[:, None]
drift = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
key = random.split(key, num=1)
noise = random.normal(key=key, shape=sample.shape)
prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean, state)
return FlaxSdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean, state=state)
def step_correct(
self,
state: ScoreSdeVeSchedulerState,
model_output: jnp.ndarray,
sample: jnp.ndarray,
key: random.KeyArray,
return_dict: bool = True,
) -> Union[FlaxSdeVeOutput, Tuple]:
"""
Correct the predicted sample based on the output model_output of the network. This is often run repeatedly
after making the prediction for the previous timestep.
Args:
state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance.
model_output (`jnp.ndarray`): direct output from learned diffusion model.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
generator: random number generator.
return_dict (`bool`): option for returning tuple rather than FlaxSdeVeOutput class
Returns:
[`FlaxSdeVeOutput`] or `tuple`: [`FlaxSdeVeOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
if state.timesteps is None:
raise ValueError(
"`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
)
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
key = random.split(key, num=1)
noise = random.normal(key=key, shape=sample.shape)
# compute step size from the model_output, the noise, and the snr
grad_norm = jnp.linalg.norm(model_output)
noise_norm = jnp.linalg.norm(noise)
step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
step_size = step_size * jnp.ones(sample.shape[0])
# compute corrected sample: model_output term and noise term
step_size = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
step_size = step_size[:, None]
prev_sample_mean = sample + step_size * model_output
prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample, state)
return FlaxSdeVeOutput(prev_sample=prev_sample, state=state)
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_sde_ve_flax.py
|
# Copyright 2022 Katherine Crowson and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from scipy import integrate
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import FlaxSchedulerMixin, FlaxSchedulerOutput
@flax.struct.dataclass
class LMSDiscreteSchedulerState:
# setable values
num_inference_steps: Optional[int] = None
timesteps: Optional[jnp.ndarray] = None
sigmas: Optional[jnp.ndarray] = None
derivatives: jnp.ndarray = jnp.array([])
@classmethod
def create(cls, num_train_timesteps: int, sigmas: jnp.ndarray):
return cls(timesteps=jnp.arange(0, num_train_timesteps)[::-1], sigmas=sigmas)
@dataclass
class FlaxLMSSchedulerOutput(FlaxSchedulerOutput):
state: LMSDiscreteSchedulerState
class FlaxLMSDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin):
"""
Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
Katherine Crowson:
https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear` or `scaled_linear`.
trained_betas (`jnp.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
"""
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[jnp.ndarray] = None,
):
if trained_betas is not None:
self.betas = jnp.asarray(trained_betas)
elif beta_schedule == "linear":
self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = jnp.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=jnp.float32) ** 2
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = jnp.cumprod(self.alphas, axis=0)
self.state = LMSDiscreteSchedulerState.create(
num_train_timesteps=num_train_timesteps, sigmas=((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5
)
def get_lms_coefficient(self, state, order, t, current_order):
"""
Compute a linear multistep coefficient.
Args:
order (TODO):
t (TODO):
current_order (TODO):
"""
def lms_derivative(tau):
prod = 1.0
for k in range(order):
if current_order == k:
continue
prod *= (tau - state.sigmas[t - k]) / (state.sigmas[t - current_order] - state.sigmas[t - k])
return prod
integrated_coeff = integrate.quad(lms_derivative, state.sigmas[t], state.sigmas[t + 1], epsrel=1e-4)[0]
return integrated_coeff
def set_timesteps(
self, state: LMSDiscreteSchedulerState, num_inference_steps: int, shape: Tuple
) -> LMSDiscreteSchedulerState:
"""
Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
state (`LMSDiscreteSchedulerState`):
the `FlaxLMSDiscreteScheduler` state data class instance.
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=jnp.float32)
low_idx = jnp.floor(timesteps).astype(int)
high_idx = jnp.ceil(timesteps).astype(int)
frac = jnp.mod(timesteps, 1.0)
sigmas = jnp.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
sigmas = (1 - frac) * sigmas[low_idx] + frac * sigmas[high_idx]
sigmas = jnp.concatenate([sigmas, jnp.array([0.0])]).astype(jnp.float32)
return state.replace(
num_inference_steps=num_inference_steps,
timesteps=timesteps.astype(int),
derivatives=jnp.array([]),
sigmas=sigmas,
)
def step(
self,
state: LMSDiscreteSchedulerState,
model_output: jnp.ndarray,
timestep: int,
sample: jnp.ndarray,
order: int = 4,
return_dict: bool = True,
) -> Union[FlaxLMSSchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
state (`LMSDiscreteSchedulerState`): the `FlaxLMSDiscreteScheduler` state data class instance.
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
order: coefficient for multi-step inference.
return_dict (`bool`): option for returning tuple rather than FlaxLMSSchedulerOutput class
Returns:
[`FlaxLMSSchedulerOutput`] or `tuple`: [`FlaxLMSSchedulerOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is the sample tensor.
"""
sigma = state.sigmas[timestep]
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
pred_original_sample = sample - sigma * model_output
# 2. Convert to an ODE derivative
derivative = (sample - pred_original_sample) / sigma
state = state.replace(derivatives=state.derivatives.append(derivative))
if len(state.derivatives) > order:
state = state.replace(derivatives=state.derivatives.pop(0))
# 3. Compute linear multistep coefficients
order = min(timestep + 1, order)
lms_coeffs = [self.get_lms_coefficient(state, order, timestep, curr_order) for curr_order in range(order)]
# 4. Compute previous sample based on the derivatives path
prev_sample = sample + sum(
coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(state.derivatives))
)
if not return_dict:
return (prev_sample, state)
return FlaxLMSSchedulerOutput(prev_sample=prev_sample, state=state)
def add_noise(
self,
state: LMSDiscreteSchedulerState,
original_samples: jnp.ndarray,
noise: jnp.ndarray,
timesteps: jnp.ndarray,
) -> jnp.ndarray:
sigma = state.sigmas[timesteps].flatten()
while len(sigma.shape) < len(noise.shape):
sigma = sigma[..., None]
noisy_samples = original_samples + noise * sigma
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_lms_discrete_flax.py
|
# Copyright 2022 Katherine Crowson and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from scipy import integrate
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, deprecate
from .scheduling_utils import SchedulerMixin
@dataclass
class LMSDiscreteSchedulerOutput(BaseOutput):
"""
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
"""
Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
Katherine Crowson:
https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear` or `scaled_linear`.
trained_betas (`np.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
"""
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[np.ndarray] = None,
):
if trained_betas is not None:
self.betas = torch.from_numpy(trained_betas)
elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = (
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32)
self.sigmas = torch.from_numpy(sigmas)
# standard deviation of the initial noise distribution
self.init_noise_sigma = self.sigmas.max()
# setable values
self.num_inference_steps = None
timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy()
self.timesteps = torch.from_numpy(timesteps)
self.derivatives = []
self.is_scale_input_called = False
def scale_model_input(
self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor]
) -> torch.FloatTensor:
"""
Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm.
Args:
sample (`torch.FloatTensor`): input sample
timestep (`float` or `torch.FloatTensor`): the current timestep in the diffusion chain
Returns:
`torch.FloatTensor`: scaled input sample
"""
if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device)
step_index = (self.timesteps == timestep).nonzero().item()
sigma = self.sigmas[step_index]
sample = sample / ((sigma**2 + 1) ** 0.5)
self.is_scale_input_called = True
return sample
def get_lms_coefficient(self, order, t, current_order):
"""
Compute a linear multistep coefficient.
Args:
order (TODO):
t (TODO):
current_order (TODO):
"""
def lms_derivative(tau):
prod = 1.0
for k in range(order):
if current_order == k:
continue
prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k])
return prod
integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0]
return integrated_coeff
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
"""
Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
device (`str` or `torch.device`, optional):
the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
"""
self.num_inference_steps = num_inference_steps
timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy()
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
self.sigmas = torch.from_numpy(sigmas).to(device=device)
self.timesteps = torch.from_numpy(timesteps).to(device=device)
self.derivatives = []
def step(
self,
model_output: torch.FloatTensor,
timestep: Union[float, torch.FloatTensor],
sample: torch.FloatTensor,
order: int = 4,
return_dict: bool = True,
) -> Union[LMSDiscreteSchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`float`): current timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
order: coefficient for multi-step inference.
return_dict (`bool`): option for returning tuple rather than LMSDiscreteSchedulerOutput class
Returns:
[`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] or `tuple`:
[`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`.
When returning a tuple, the first element is the sample tensor.
"""
if not self.is_scale_input_called:
warnings.warn(
"The `scale_model_input` function should be called before `step` to ensure correct denoising. "
"See `StableDiffusionPipeline` for a usage example."
)
if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device)
if (
isinstance(timestep, int)
or isinstance(timestep, torch.IntTensor)
or isinstance(timestep, torch.LongTensor)
):
deprecate(
"timestep as an index",
"0.7.0",
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
" `LMSDiscreteScheduler.step()` will not be supported in future versions. Make sure to pass"
" one of the `scheduler.timesteps` as a timestep.",
standard_warn=False,
)
step_index = timestep
else:
step_index = (self.timesteps == timestep).nonzero().item()
sigma = self.sigmas[step_index]
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
pred_original_sample = sample - sigma * model_output
# 2. Convert to an ODE derivative
derivative = (sample - pred_original_sample) / sigma
self.derivatives.append(derivative)
if len(self.derivatives) > order:
self.derivatives.pop(0)
# 3. Compute linear multistep coefficients
order = min(step_index + 1, order)
lms_coeffs = [self.get_lms_coefficient(order, step_index, curr_order) for curr_order in range(order)]
# 4. Compute previous sample based on the derivatives path
prev_sample = sample + sum(
coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives))
)
if not return_dict:
return (prev_sample,)
return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
def add_noise(
self,
original_samples: torch.FloatTensor,
noise: torch.FloatTensor,
timesteps: torch.FloatTensor,
) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
self.sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
self.timesteps = self.timesteps.to(original_samples.device)
timesteps = timesteps.to(original_samples.device)
schedule_timesteps = self.timesteps
if isinstance(timesteps, torch.IntTensor) or isinstance(timesteps, torch.LongTensor):
deprecate(
"timesteps as indices",
"0.7.0",
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
" `LMSDiscreteScheduler.add_noise()` will not be supported in future versions. Make sure to"
" pass values from `scheduler.timesteps` as timesteps.",
standard_warn=False,
)
step_indices = timesteps
else:
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
sigma = self.sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
sigma = sigma.unsqueeze(-1)
noisy_samples = original_samples + noise * sigma
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_lms_discrete.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import is_flax_available, is_scipy_available, is_torch_available
if is_torch_available():
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_utils import SchedulerMixin
else:
from ..utils.dummy_pt_objects import * # noqa F403
if is_flax_available():
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import FlaxSchedulerMixin
else:
from ..utils.dummy_flax_objects import * # noqa F403
if is_scipy_available() and is_torch_available():
from .scheduling_lms_discrete import LMSDiscreteScheduler
else:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
|
diffusers-main
|
src/diffusers/schedulers/__init__.py
|
# Copyright 2022 Stanford University Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import FlaxSchedulerMixin, FlaxSchedulerOutput
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999) -> jnp.ndarray:
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return jnp.array(betas, dtype=jnp.float32)
@flax.struct.dataclass
class DDIMSchedulerState:
# setable values
timesteps: jnp.ndarray
alphas_cumprod: jnp.ndarray
num_inference_steps: Optional[int] = None
@classmethod
def create(cls, num_train_timesteps: int, alphas_cumprod: jnp.ndarray):
return cls(timesteps=jnp.arange(0, num_train_timesteps)[::-1], alphas_cumprod=alphas_cumprod)
@dataclass
class FlaxDDIMSchedulerOutput(FlaxSchedulerOutput):
state: DDIMSchedulerState
class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin):
"""
Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising
diffusion probabilistic models (DDPMs) with non-Markovian guidance.
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
For more details, see the original paper: https://arxiv.org/abs/2010.02502
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`jnp.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
clip_sample (`bool`, default `True`):
option to clip predicted sample between -1 and 1 for numerical stability.
set_alpha_to_one (`bool`, default `True`):
each diffusion step uses the value of alphas product at that step and at the previous one. For the final
step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
otherwise it uses the value of alpha at step 0.
steps_offset (`int`, default `0`):
an offset added to the inference steps. You can use a combination of `offset=1` and
`set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
stable diffusion.
"""
@property
def has_state(self):
return True
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
set_alpha_to_one: bool = True,
steps_offset: int = 0,
):
if beta_schedule == "linear":
self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = jnp.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=jnp.float32) ** 2
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
# HACK for now - clean up later (PVP)
self._alphas_cumprod = jnp.cumprod(self.alphas, axis=0)
# At every step in ddim, we are looking into the previous alphas_cumprod
# For the final step, there is no previous alphas_cumprod because we are already at 0
# `set_alpha_to_one` decides whether we set this parameter simply to one or
# whether we use the final alpha of the "non-previous" one.
self.final_alpha_cumprod = jnp.array(1.0) if set_alpha_to_one else float(self._alphas_cumprod[0])
# standard deviation of the initial noise distribution
self.init_noise_sigma = 1.0
def scale_model_input(
self, state: DDIMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None
) -> jnp.ndarray:
"""
Args:
state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
sample (`jnp.ndarray`): input sample
timestep (`int`, optional): current timestep
Returns:
`jnp.ndarray`: scaled input sample
"""
return sample
def create_state(self):
return DDIMSchedulerState.create(
num_train_timesteps=self.config.num_train_timesteps, alphas_cumprod=self._alphas_cumprod
)
def _get_variance(self, timestep, prev_timestep, alphas_cumprod):
alpha_prod_t = alphas_cumprod[timestep]
alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], self.final_alpha_cumprod)
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
return variance
def set_timesteps(self, state: DDIMSchedulerState, num_inference_steps: int, shape: Tuple) -> DDIMSchedulerState:
"""
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
state (`DDIMSchedulerState`):
the `FlaxDDIMScheduler` state data class instance.
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
offset = self.config.steps_offset
step_ratio = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1]
timesteps = timesteps + offset
return state.replace(num_inference_steps=num_inference_steps, timesteps=timesteps)
def step(
self,
state: DDIMSchedulerState,
model_output: jnp.ndarray,
timestep: int,
sample: jnp.ndarray,
return_dict: bool = True,
) -> Union[FlaxDDIMSchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
state (`DDIMSchedulerState`): the `FlaxDDIMScheduler` state data class instance.
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
key (`random.KeyArray`): a PRNG key.
eta (`float`): weight of noise for added noise in diffusion step.
use_clipped_model_output (`bool`): TODO
return_dict (`bool`): option for returning tuple rather than FlaxDDIMSchedulerOutput class
Returns:
[`FlaxDDIMSchedulerOutput`] or `tuple`: [`FlaxDDIMSchedulerOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is the sample tensor.
"""
if state.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# TODO(Patrick) - eta is always 0.0 for now, allow to be set in step function
eta = 0.0
# 1. get previous step value (=t-1)
prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps
alphas_cumprod = state.alphas_cumprod
# 2. compute alphas, betas
alpha_prod_t = alphas_cumprod[timestep]
alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], self.final_alpha_cumprod)
beta_prod_t = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
# 4. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
variance = self._get_variance(timestep, prev_timestep, alphas_cumprod)
std_dev_t = eta * variance ** (0.5)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, state)
return FlaxDDIMSchedulerOutput(prev_sample=prev_sample, state=state)
def add_noise(
self,
original_samples: jnp.ndarray,
noise: jnp.ndarray,
timesteps: jnp.ndarray,
) -> jnp.ndarray:
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod[:, None]
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.0
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod[:, None]
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_ddim_flax.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import torch
from ..utils import BaseOutput
SCHEDULER_CONFIG_NAME = "scheduler_config.json"
@dataclass
class SchedulerOutput(BaseOutput):
"""
Base class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
"""
prev_sample: torch.FloatTensor
class SchedulerMixin:
"""
Mixin containing common functions for the schedulers.
"""
config_name = SCHEDULER_CONFIG_NAME
|
diffusers-main
|
src/diffusers/schedulers/scheduling_utils.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import jax.numpy as jnp
from ..utils import BaseOutput
SCHEDULER_CONFIG_NAME = "scheduler_config.json"
@dataclass
class FlaxSchedulerOutput(BaseOutput):
"""
Base class for the scheduler's step function output.
Args:
prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
"""
prev_sample: jnp.ndarray
class FlaxSchedulerMixin:
"""
Mixin containing common functions for the schedulers.
"""
config_name = SCHEDULER_CONFIG_NAME
|
diffusers-main
|
src/diffusers/schedulers/scheduling_utils_flax.py
|
# Copyright 2022 Google Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class SdeVeOutput(BaseOutput):
"""
Output class for the ScoreSdeVeScheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Mean averaged `prev_sample`. Same as `prev_sample`, only mean-averaged over previous timesteps.
"""
prev_sample: torch.FloatTensor
prev_sample_mean: torch.FloatTensor
class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin):
"""
The variance exploding stochastic differential equation (SDE) scheduler.
For more information, see the original paper: https://arxiv.org/abs/2011.13456
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
snr (`float`):
coefficient weighting the step from the model_output sample (from the network) to the random noise.
sigma_min (`float`):
initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the
distribution of the data.
sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model.
sampling_eps (`float`): the end value of sampling, where timesteps decrease progressively from 1 to
epsilon.
correct_steps (`int`): number of correction steps performed on a produced sample.
"""
@register_to_config
def __init__(
self,
num_train_timesteps: int = 2000,
snr: float = 0.15,
sigma_min: float = 0.01,
sigma_max: float = 1348.0,
sampling_eps: float = 1e-5,
correct_steps: int = 1,
):
# standard deviation of the initial noise distribution
self.init_noise_sigma = sigma_max
# setable values
self.timesteps = None
self.set_sigmas(num_train_timesteps, sigma_min, sigma_max, sampling_eps)
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.FloatTensor`): input sample
timestep (`int`, optional): current timestep
Returns:
`torch.FloatTensor`: scaled input sample
"""
return sample
def set_timesteps(
self, num_inference_steps: int, sampling_eps: float = None, device: Union[str, torch.device] = None
):
"""
Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
sampling_eps (`float`, optional): final timestep value (overrides value given at Scheduler instantiation).
"""
sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps
self.timesteps = torch.linspace(1, sampling_eps, num_inference_steps, device=device)
def set_sigmas(
self, num_inference_steps: int, sigma_min: float = None, sigma_max: float = None, sampling_eps: float = None
):
"""
Sets the noise scales used for the diffusion chain. Supporting function to be run before inference.
The sigmas control the weight of the `drift` and `diffusion` components of sample update.
Args:
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
sigma_min (`float`, optional):
initial noise scale value (overrides value given at Scheduler instantiation).
sigma_max (`float`, optional): final noise scale value (overrides value given at Scheduler instantiation).
sampling_eps (`float`, optional): final timestep value (overrides value given at Scheduler instantiation).
"""
sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min
sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max
sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(num_inference_steps, sampling_eps)
self.sigmas = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
self.discrete_sigmas = torch.exp(torch.linspace(math.log(sigma_min), math.log(sigma_max), num_inference_steps))
self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def get_adjacent_sigma(self, timesteps, t):
return torch.where(
timesteps == 0,
torch.zeros_like(t.to(timesteps.device)),
self.discrete_sigmas[timesteps - 1].to(timesteps.device),
)
def step_pred(
self,
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
generator: Optional[torch.Generator] = None,
return_dict: bool = True,
) -> Union[SdeVeOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
generator: random number generator.
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
Returns:
[`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] if
`return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
)
timestep = timestep * torch.ones(
sample.shape[0], device=sample.device
) # torch.repeat_interleave(timestep, sample.shape[0])
timesteps = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
timesteps = timesteps.to(self.discrete_sigmas.device)
sigma = self.discrete_sigmas[timesteps].to(sample.device)
adjacent_sigma = self.get_adjacent_sigma(timesteps, timestep).to(sample.device)
drift = torch.zeros_like(sample)
diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
diffusion = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
diffusion = diffusion.unsqueeze(-1)
drift = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
noise = torch.randn(sample.shape, layout=sample.layout, generator=generator).to(sample.device)
prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean)
def step_correct(
self,
model_output: torch.FloatTensor,
sample: torch.FloatTensor,
generator: Optional[torch.Generator] = None,
return_dict: bool = True,
) -> Union[SchedulerOutput, Tuple]:
"""
Correct the predicted sample based on the output model_output of the network. This is often run repeatedly
after making the prediction for the previous timestep.
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
generator: random number generator.
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
Returns:
[`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] if
`return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
)
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
noise = torch.randn(sample.shape, layout=sample.layout, generator=generator).to(sample.device)
# compute step size from the model_output, the noise, and the snr
grad_norm = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
step_size = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
step_size = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
step_size = step_size.unsqueeze(-1)
prev_sample_mean = sample + step_size * model_output
prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=prev_sample)
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_sde_ve.py
|
# Copyright 2022 Stanford University Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, deprecate
from .scheduling_utils import SchedulerMixin
@dataclass
class DDIMSchedulerOutput(BaseOutput):
"""
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999) -> torch.Tensor:
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return torch.tensor(betas)
class DDIMScheduler(SchedulerMixin, ConfigMixin):
"""
Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising
diffusion probabilistic models (DDPMs) with non-Markovian guidance.
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
For more details, see the original paper: https://arxiv.org/abs/2010.02502
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`np.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
clip_sample (`bool`, default `True`):
option to clip predicted sample between -1 and 1 for numerical stability.
set_alpha_to_one (`bool`, default `True`):
each diffusion step uses the value of alphas product at that step and at the previous one. For the final
step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
otherwise it uses the value of alpha at step 0.
steps_offset (`int`, default `0`):
an offset added to the inference steps. You can use a combination of `offset=1` and
`set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
stable diffusion.
"""
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[np.ndarray] = None,
clip_sample: bool = True,
set_alpha_to_one: bool = True,
steps_offset: int = 0,
):
if trained_betas is not None:
self.betas = torch.from_numpy(trained_betas)
elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = (
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
# At every step in ddim, we are looking into the previous alphas_cumprod
# For the final step, there is no previous alphas_cumprod because we are already at 0
# `set_alpha_to_one` decides whether we set this parameter simply to one or
# whether we use the final alpha of the "non-previous" one.
self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
# standard deviation of the initial noise distribution
self.init_noise_sigma = 1.0
# setable values
self.num_inference_steps = None
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.FloatTensor`): input sample
timestep (`int`, optional): current timestep
Returns:
`torch.FloatTensor`: scaled input sample
"""
return sample
def _get_variance(self, timestep, prev_timestep):
alpha_prod_t = self.alphas_cumprod[timestep]
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
return variance
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None, **kwargs):
"""
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
deprecated_offset = deprecate(
"offset", "0.7.0", "Please pass `steps_offset` to `__init__` instead.", take_from=kwargs
)
offset = deprecated_offset or self.config.steps_offset
self.num_inference_steps = num_inference_steps
step_ratio = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy()
self.timesteps = torch.from_numpy(timesteps).to(device)
self.timesteps += offset
def step(
self,
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
eta: float = 0.0,
use_clipped_model_output: bool = False,
generator=None,
return_dict: bool = True,
) -> Union[DDIMSchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
eta (`float`): weight of noise for added noise in diffusion step.
use_clipped_model_output (`bool`): TODO
generator: random number generator.
return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class
Returns:
[`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:
[`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
alpha_prod_t = self.alphas_cumprod[timestep]
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
beta_prod_t = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
# 4. Clip "predicted x_0"
if self.config.clip_sample:
pred_original_sample = torch.clamp(pred_original_sample, -1, 1)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
variance = self._get_variance(timestep, prev_timestep)
std_dev_t = eta * variance ** (0.5)
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
device = model_output.device if torch.is_tensor(model_output) else "cpu"
noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device)
variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise
prev_sample = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
def add_noise(
self,
original_samples: torch.FloatTensor,
noise: torch.FloatTensor,
timesteps: torch.IntTensor,
) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
timesteps = timesteps.to(original_samples.device)
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_ddim.py
|
# Copyright 2022 Zhejiang University Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
import math
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import deprecate
from .scheduling_utils import SchedulerMixin, SchedulerOutput
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
class PNDMScheduler(SchedulerMixin, ConfigMixin):
"""
Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques,
namely Runge-Kutta method and a linear multi-step method.
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
[`~ConfigMixin.from_config`] functions.
For more details, see the original paper: https://arxiv.org/abs/2202.09778
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`np.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
skip_prk_steps (`bool`):
allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required
before plms steps; defaults to `False`.
set_alpha_to_one (`bool`, default `False`):
each diffusion step uses the value of alphas product at that step and at the previous one. For the final
step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
otherwise it uses the value of alpha at step 0.
steps_offset (`int`, default `0`):
an offset added to the inference steps. You can use a combination of `offset=1` and
`set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
stable diffusion.
"""
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[np.ndarray] = None,
skip_prk_steps: bool = False,
set_alpha_to_one: bool = False,
steps_offset: int = 0,
):
if trained_betas is not None:
self.betas = torch.from_numpy(trained_betas)
elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = (
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
# standard deviation of the initial noise distribution
self.init_noise_sigma = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
self.pndm_order = 4
# running values
self.cur_model_output = 0
self.counter = 0
self.cur_sample = None
self.ets = []
# setable values
self.num_inference_steps = None
self._timesteps = np.arange(0, num_train_timesteps)[::-1].copy()
self.prk_timesteps = None
self.plms_timesteps = None
self.timesteps = None
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None, **kwargs):
"""
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
deprecated_offset = deprecate(
"offset", "0.7.0", "Please pass `steps_offset` to `__init__` instead.", take_from=kwargs
)
offset = deprecated_offset or self.config.steps_offset
self.num_inference_steps = num_inference_steps
step_ratio = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
self._timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()
self._timesteps += offset
if self.config.skip_prk_steps:
# for some models like stable diffusion the prk steps can/should be skipped to
# produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation
# is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51
self.prk_timesteps = np.array([])
self.plms_timesteps = np.concatenate([self._timesteps[:-1], self._timesteps[-2:-1], self._timesteps[-1:]])[
::-1
].copy()
else:
prk_timesteps = np.array(self._timesteps[-self.pndm_order :]).repeat(2) + np.tile(
np.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order
)
self.prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1].copy()
self.plms_timesteps = self._timesteps[:-3][
::-1
].copy() # we copy to avoid having negative strides which are not supported by torch.from_numpy
timesteps = np.concatenate([self.prk_timesteps, self.plms_timesteps]).astype(np.int64)
self.timesteps = torch.from_numpy(timesteps).to(device)
self.ets = []
self.counter = 0
def step(
self,
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
return_dict: bool = True,
) -> Union[SchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`.
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
Returns:
[`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
[`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
if self.counter < len(self.prk_timesteps) and not self.config.skip_prk_steps:
return self.step_prk(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict)
else:
return self.step_plms(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict)
def step_prk(
self,
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
return_dict: bool = True,
) -> Union[SchedulerOutput, Tuple]:
"""
Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the
solution to the differential equation.
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
Returns:
[`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
diff_to_prev = 0 if self.counter % 2 else self.config.num_train_timesteps // self.num_inference_steps // 2
prev_timestep = timestep - diff_to_prev
timestep = self.prk_timesteps[self.counter // 4 * 4]
if self.counter % 4 == 0:
self.cur_model_output += 1 / 6 * model_output
self.ets.append(model_output)
self.cur_sample = sample
elif (self.counter - 1) % 4 == 0:
self.cur_model_output += 1 / 3 * model_output
elif (self.counter - 2) % 4 == 0:
self.cur_model_output += 1 / 3 * model_output
elif (self.counter - 3) % 4 == 0:
model_output = self.cur_model_output + 1 / 6 * model_output
self.cur_model_output = 0
# cur_sample should not be `None`
cur_sample = self.cur_sample if self.cur_sample is not None else sample
prev_sample = self._get_prev_sample(cur_sample, timestep, prev_timestep, model_output)
self.counter += 1
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=prev_sample)
def step_plms(
self,
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
return_dict: bool = True,
) -> Union[SchedulerOutput, Tuple]:
"""
Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple
times to approximate the solution.
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
Returns:
[`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
if not self.config.skip_prk_steps and len(self.ets) < 3:
raise ValueError(
f"{self.__class__} can only be run AFTER scheduler has been run "
"in 'prk' mode for at least 12 iterations "
"See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py "
"for more information."
)
prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
if self.counter != 1:
self.ets.append(model_output)
else:
prev_timestep = timestep
timestep = timestep + self.config.num_train_timesteps // self.num_inference_steps
if len(self.ets) == 1 and self.counter == 0:
model_output = model_output
self.cur_sample = sample
elif len(self.ets) == 1 and self.counter == 1:
model_output = (model_output + self.ets[-1]) / 2
sample = self.cur_sample
self.cur_sample = None
elif len(self.ets) == 2:
model_output = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
model_output = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
model_output = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
prev_sample = self._get_prev_sample(sample, timestep, prev_timestep, model_output)
self.counter += 1
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=prev_sample)
def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.FloatTensor`): input sample
Returns:
`torch.FloatTensor`: scaled input sample
"""
return sample
def _get_prev_sample(self, sample, timestep, prev_timestep, model_output):
# See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf
# this function computes x_(t−δ) using the formula of (9)
# Note that x_t needs to be added to both sides of the equation
# Notation (<variable name> -> <name in paper>
# alpha_prod_t -> α_t
# alpha_prod_t_prev -> α_(t−δ)
# beta_prod_t -> (1 - α_t)
# beta_prod_t_prev -> (1 - α_(t−δ))
# sample -> x_t
# model_output -> e_θ(x_t, t)
# prev_sample -> x_(t−δ)
alpha_prod_t = self.alphas_cumprod[timestep]
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
# corresponds to (α_(t−δ) - α_t) divided by
# denominator of x_t in formula (9) and plus 1
# Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) =
# sqrt(α_(t−δ)) / sqrt(α_t))
sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5)
# corresponds to denominator of e_θ(x_t, t) in formula (9)
model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + (
alpha_prod_t * beta_prod_t * alpha_prod_t_prev
) ** (0.5)
# full formula (9)
prev_sample = (
sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff
)
return prev_sample
def add_noise(
self,
original_samples: torch.FloatTensor,
noise: torch.FloatTensor,
timesteps: torch.IntTensor,
) -> torch.Tensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
timesteps = timesteps.to(original_samples.device)
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
diffusers-main
|
src/diffusers/schedulers/scheduling_pndm.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import re
import collections
import string
import json
import math
import logging
from tqdm import tqdm
from transformer.tokenization import BasicTokenizer, whitespace_tokenize
logger = logging.getLogger()
RawResult = collections.namedtuple("RawResult",["unique_id", "start_logits", "end_logits"])
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s: return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = collections.Counter(prediction_tokens) & collections.Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 1
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
# methods for SQuAD2.0 evaluation
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers'] if normalize_answer(a['text'])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict([
('exact_match', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
return collections.OrderedDict([
('exact_match', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('total', total),
])
def evaluate_v2(dataset, predictions):
na_probs = {k: 0.0 for k in predictions}
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(dataset, predictions)
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, 1.0)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, 1.0)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
return out_eval
class SquadExample(object):
"""
A single training/test example for the Squad dataset.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.end_position:
s += ", end_position: %d" % (self.end_position)
if self.is_impossible:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in tqdm(enumerate(examples)):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 1:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
logger.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
unique_id += 1
return features
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, verbose_logging,
version_2_with_negative, null_score_diff_threshold,dev_dataset):
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0,
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
if version_2_with_negative:
result = evaluate_v2(dev_dataset, all_predictions)
else:
result = evaluate(dev_dataset, all_predictions)
return result
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
cnt = 500000
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
if 'is_impossible' not in qa:
qa['is_impossible'] = True
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
if len(examples) > cnt:
break
if len(examples) > cnt:
break
if len(examples) > cnt:
break
logger.info('load {} examples!'.format(len(examples)))
return input_data,examples
|
bit-main
|
utils_squad.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# 2022.09.25 - Add distill_attn argument for removing attention distillation
# Meta Platforms, Inc. <[email protected]>
#
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import logging
from transformer.file_utils import WEIGHTS_NAME, CONFIG_NAME, MISC_NAME
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss
from transformer.optimization import BertAdam
from helper import *
from utils_glue import *
import numpy as np
import pickle
logging.basicConfig(level=logging.INFO)
class KDLearner(object):
def __init__(self, args, device, student_model, teacher_model=None,num_train_optimization_steps=None):
self.args = args
self.device = device
self.n_gpu = torch.cuda.device_count()
self.student_model = student_model
self.teacher_model = teacher_model
self.num_train_optimization_steps = num_train_optimization_steps
self._check_params()
def build(self, lr=None):
self.prev_global_step = 0
if (self.args.distill_rep or self.args.distill_attn) and not self.args.distill_logit:
stage = 'kd_stage1'
elif self.args.distill_logit and not (self.args.distill_rep or self.args.distill_attn):
stage = 'kd_stage2'
elif self.args.distill_logit and (self.args.distill_rep or self.args.distill_attn):
stage = 'kd_joint'
else:
stage = 'nokd'
self.output_dir = os.path.join(self.args.output_dir, stage)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
param_optimizer = list(self.student_model.named_parameters())
self.clip_params = {}
for k, v in param_optimizer:
if 'clip_' in k:
self.clip_params[k] = v
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if (not any(nd in n for nd in no_decay) and not 'clip_' in n)], 'weight_decay': self.args.weight_decay},
{'params': [p for n, p in param_optimizer if (any(nd in n for nd in no_decay) and not 'clip_' in n)], 'weight_decay': 0.0},
{'params': [p for n, p in self.clip_params.items()], 'lr': self.args.clip_lr, 'weight_decay': self.args.clip_wd},
]
schedule = 'warmup_linear'
learning_rate = self.args.learning_rate if not lr else lr
self.optimizer = BertAdam(optimizer_grouped_parameters,
schedule=schedule,
lr=learning_rate,
warmup=self.args.warmup_proportion,
t_total=self.num_train_optimization_steps)
logging.info("Optimizer prepared.")
self._check_quantized_modules()
self._setup_grad_scale_stats()
def _do_eval(self, model, task_name, eval_dataloader, output_mode, eval_labels, num_labels):
eval_loss = 0
nb_eval_steps = 0
preds = []
for batch_ in tqdm(eval_dataloader, desc="Evaluating"):
batch_ = tuple(t.to(self.device) for t in batch_)
with torch.no_grad():
input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch_
logits, _, _ = model(input_ids, segment_ids, input_mask)
# create eval loss and other metric required by the task
if output_mode == "classification":
loss_fct = CrossEntropyLoss()
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(task_name, preds, eval_labels.numpy())
result['eval_loss'] = eval_loss
return result
def evaluate(self, task_name, eval_dataloader, output_mode, eval_labels, num_labels, eval_examples,
mm_eval_dataloader, mm_eval_labels):
""" Evalutaion of checkpoints from models/. directly use args.student_model """
self.student_model.eval()
result = self._do_eval(self.student_model, task_name, eval_dataloader, output_mode, eval_labels, num_labels)
logging.info("***** Running evaluation, Task: %s, Job_id: %s *****" % (self.args.task_name, self.args.job_id))
logging.info(" Num examples = %d", len(eval_examples))
logging.info(" Batch size = %d", self.args.batch_size)
logging.info("***** Eval results, Task: %s, Job_id: %s *****" % (self.args.task_name, self.args.job_id))
for key in sorted(result.keys()):
logging.info(" %s = %s", key, str(result[key]))
if task_name == "mnli":
logging.info('MNLI-mm Evaluation')
result = self._do_eval(self.student_model, 'mnli-mm', mm_eval_dataloader, output_mode, mm_eval_labels, num_labels)
tmp_output_eval_file = os.path.join(self.args.output_dir + '-MM', "eval_results.txt")
result_to_file(result, tmp_output_eval_file)
def train(self, train_examples, task_name, output_mode, eval_labels, num_labels,
train_dataloader, eval_dataloader, eval_examples, tokenizer, mm_eval_labels, mm_eval_dataloader):
""" quant-aware pretraining + KD """
loss_mse = MSELoss()
self.teacher_model.eval()
teacher_results = self._do_eval(self.teacher_model, task_name, eval_dataloader, output_mode, eval_labels, num_labels)
logging.info("Teacher network evaluation")
for key in sorted(teacher_results.keys()):
logging.info(" %s = %s", key, str(teacher_results[key]))
self.teacher_model.train()
global_step = self.prev_global_step
best_dev_acc = 0.0
output_eval_file = os.path.join(self.args.output_dir, "eval_results.txt")
logging.info("***** Running training, Task: %s, Job id: %s*****" % (self.args.task_name, self.args.job_id))
logging.info(" Distill rep: %d, Distill attn: %d, Distill logit: %d" % (self.args.distill_rep, self.args.distill_attn, self.args.distill_logit))
logging.info(" Num examples = %d", len(train_examples))
logging.info(" Batch size = %d", self.args.batch_size)
logging.info(" Num steps = %d", self.num_train_optimization_steps)
global_tr_loss = 0
for epoch_ in range(self.args.num_train_epochs):
tr_loss = 0.
tr_att_loss = 0.
tr_rep_loss = 0.
tr_cls_loss = 0.
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
self.student_model.train()
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch
att_loss = 0.
rep_loss = 0.
cls_loss = 0.
rep_loss_layerwise = []
att_loss_layerwise = []
student_logits, student_atts, student_reps = self.student_model(input_ids, segment_ids, input_mask)
if self.args.distill_logit or self.args.distill_rep or self.args.distill_attn:
with torch.no_grad():
teacher_logits, teacher_atts, teacher_reps = self.teacher_model(input_ids, segment_ids, input_mask)
loss = 0.
if self.args.distill_logit:
cls_loss = soft_cross_entropy(student_logits / self.args.temperature,
teacher_logits / self.args.temperature)
loss += cls_loss
tr_cls_loss += cls_loss.item()
if self.args.distill_rep or self.args.distill_attn:
for student_att, teacher_att in zip(student_atts, teacher_atts):
student_att = torch.where(student_att <= -1e2, torch.zeros_like(student_att).to(self.device),
student_att)
teacher_att = torch.where(teacher_att <= -1e2, torch.zeros_like(teacher_att).to(self.device),
teacher_att)
tmp_loss = loss_mse(student_att, teacher_att)
att_loss += tmp_loss
att_loss_layerwise.append(tmp_loss.item())
for student_rep, teacher_rep in zip(student_reps, teacher_reps):
tmp_loss = loss_mse(student_rep, teacher_rep)
rep_loss += tmp_loss
rep_loss_layerwise.append(tmp_loss.item())
tr_att_loss += att_loss.item()
tr_rep_loss += rep_loss.item()
if self.args.distill_rep:
loss += rep_loss
if self.args.distill_attn:
loss += att_loss
else:
if output_mode == "classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(student_logits, label_ids.view(-1))
elif output_mode == "regression":
loss_mse = MSELoss()
loss = loss_mse(student_logits.view(-1), label_ids.view(-1))
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
global_tr_loss += loss.item()
nb_tr_examples += label_ids.size(0)
nb_tr_steps += 1
# evaluation and save model
if global_step % self.args.eval_step == 0 or \
global_step == len(train_dataloader)-1:
logging.info(" Epoch = {} iter {} step".format(epoch_, global_step))
logging.info(" Num examples = %d", len(eval_examples))
logging.info(f" Previous best = {best_dev_acc}")
loss = tr_loss / (step + 1)
global_avg_loss = global_tr_loss / (global_step + 1)
cls_loss = tr_cls_loss / (step + 1)
att_loss = tr_att_loss / (step + 1)
rep_loss = tr_rep_loss / (step + 1)
self.student_model.eval()
result = self._do_eval(self.student_model, task_name, eval_dataloader, output_mode, eval_labels, num_labels)
result['global_step'] = global_step
result['cls_loss'] = cls_loss
result['att_loss'] = att_loss
result['rep_loss'] = rep_loss
result['loss'] = loss
result['global_loss'] = global_avg_loss
preds = student_logits.detach().cpu().numpy()
train_label = label_ids.cpu().numpy()
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
result['train_batch_acc'] = list(compute_metrics(task_name, preds, train_label).values())[0]
if self.args.distill_rep or self.args.distill_attn:
logging.info("embedding layer rep_loss: %.8f" % (rep_loss_layerwise[0]))
rep_loss_layerwise = rep_loss_layerwise[1:]
for lid in range(len(rep_loss_layerwise)):
logging.info("layer %d rep_loss: %.8f" % (lid+1, rep_loss_layerwise[lid]))
logging.info("layer %d att_loss: %.8f" % (lid+1, att_loss_layerwise[lid]))
result_to_file(result, output_eval_file)
save_model = False
if task_name in acc_tasks and result['acc'] > best_dev_acc:
best_dev_acc = result['acc']
save_model = True
if task_name in corr_tasks and result['corr'] > best_dev_acc:
best_dev_acc = result['corr']
save_model = True
if task_name in mcc_tasks and result['mcc'] > best_dev_acc:
best_dev_acc = result['mcc']
save_model = True
if save_model:
self._save()
if task_name == "mnli":
logging.info('MNLI-mm Evaluation')
result = self._do_eval(self.student_model, 'mnli-mm', mm_eval_dataloader, output_mode, mm_eval_labels, num_labels)
result['global_step'] = global_step
if not os.path.exists(self.output_dir + '-MM'):
os.makedirs(self.output_dir + '-MM')
tmp_output_eval_file = os.path.join(self.output_dir + '-MM', "eval_results.txt")
result_to_file(result, tmp_output_eval_file)
# if self.args.quantize_weight:
# self.quanter.restore()
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.optimizer.step()
self.optimizer.zero_grad()
global_step += 1
def _save(self):
logging.info("******************** Save model ********************")
model_to_save = self.student_model.module if hasattr(self.student_model, 'module') else self.student_model
output_model_file = os.path.join(self.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(self.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def _check_params(self):
if not self.args.do_eval:
assert self.teacher_model, 'teacher model must not be None in train mode.'
def _check_quantized_modules(self):
logging.info("Checking module types.")
for k, m in self.student_model.named_modules():
if isinstance(m, torch.nn.Linear):
logging.info('%s: %s' % (k, str(m)))
def _setup_grad_scale_stats(self):
self.grad_scale_stats = {'weight': None, \
'bias': None, \
'layer_norm': None, \
'step_size/clip_val': None}
self.ema_grad = 0.9
def check_grad_scale(self):
logging.info("Check grad scale ratio: grad/w")
for k, v in self.student_model.named_parameters():
if v.grad is not None:
has_grad = True
ratio = v.grad.norm(p=2) / v.data.norm(p=2)
# print('%.6e, %s' % (ratio.float(), k))
else:
has_grad = False
logging.info('params: %s has no gradient' % k)
continue
# update grad_scale stats
if 'weight' in k and v.ndimension() == 2:
key = 'weight'
elif 'bias' in k and v.ndimension() == 1:
key = 'bias'
elif 'LayerNorm' in k and 'weight' in k and v.ndimension() == 1:
key = 'layer_norm'
elif 'clip_' in k:
key = 'step_size/clip_val'
else:
key = None
if key and has_grad:
if self.grad_scale_stats[key]:
self.grad_scale_stats[key] = self.ema_grad * self.grad_scale_stats[key] + (1-self.ema_grad) * ratio
else:
self.grad_scale_stats[key] = ratio
for (key, val) in self.grad_scale_stats.items():
if val is not None:
logging.info('%.6e, %s' % (val, key))
|
bit-main
|
kd_learner_glue.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import string
import random
import torch
def generate_job_id():
return ''.join(random.sample(string.ascii_letters+string.digits, 5))
def init_logging(log_path):
if not os.path.isdir(os.path.dirname(log_path)):
print("Log path does not exist. Create a new one.")
os.makedirs(os.path.dirname(log_path))
if os.path.exists(log_path):
print("%s already exists. replace it with current experiment." % log_path)
os.system('rm %s' % log_path)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logFormatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s')
fileHandler = logging.FileHandler(log_path)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
def print_args(args):
for k, v in zip(args.keys(), args.values()):
logging.info("{0}: {1}".format(k, v))
def soft_cross_entropy(predicts, targets):
student_likelihood = torch.nn.functional.log_softmax(predicts, dim=-1)
targets_prob = torch.nn.functional.softmax(targets, dim=-1)
return (- targets_prob * student_likelihood).mean()
def visualize_clip(clip_dict):
# assert len(clip_dict) > 0, 'empty clip_dict, possibly not learnable_scalling.'
logging.info("Visualizing learnable clipping vals...")
for n, p in clip_dict.items():
if p.nelement() == 2:
# PACT clip val has two elements
logging.info("PACT clip_val: %s: (%.4f, %.4f)" % (n, p[0].item(), p[1].item()))
elif p.nelement() == 1:
# Alpha has only one element
logging.info("Alpha: %s: %.4f" % (n, p.item()))
def result_to_file(result, file_name):
with open(file_name, "a") as writer:
logging.info("***** Eval results *****")
for key in sorted(result.keys()):
if result[key]>0.0:
logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
|
bit-main
|
helper.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# 2022.09.25 - Add support for using quantized Bert model as teacher
# Meta Platforms, Inc. <[email protected]>
#
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import os
import random
import numpy as np
import torch
import copy
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.nn import MSELoss
from transformer.configuration_bert import BertConfig
from transformer.tokenization import BertTokenizer
from transformer.optimization import BertAdam, WarmupLinearSchedule
from transformer.file_utils import WEIGHTS_NAME, CONFIG_NAME
from transformer.modeling_bert import BertForQuestionAnswering
from transformer.modeling_bert_quant import BertForQuestionAnswering as QuantBertForQuestionAnswering
from helper import *
import pickle
from kd_learner_squad import KDLearner
from utils_squad import write_predictions,read_squad_examples,InputFeatures,convert_examples_to_features
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--job_id", default='tmp', type=str, help='jobid to save training logs')
parser.add_argument("--data_dir", default=None, type=str,help="The root dir of glue data")
parser.add_argument("--teacher_model", default='', type=str, help="The teacher model dir.")
parser.add_argument("--student_model", default='', type=str, help="The student model dir.")
parser.add_argument("--vocab_dir", default='', type=str, help="The vocab.txt dir.")
parser.add_argument("--task_name", default=None, type=str, help="The name of the glue task to train.")
parser.add_argument("--output_dir", default='output', type=str,help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--max_seq_length", default=None, type=int, help="The maximum total input sequence length after
WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--batch_size", default=None, type=int, help="Total batch size for training.")
parser.add_argument("--learning_rate", default=None, type=float, help="The initial learning rate for Adam.")
parser.add_argument('--weight_decay', '--wd', default=0.01, type=float, metavar='W', help='weight decay')
parser.add_argument("--num_train_epochs", default=None, type=int, help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--do_eval", action='store_true')
parser.add_argument('--eval_step', type=int, default=100)
# distillation params
parser.add_argument('--aug_train', action='store_true',
help="Whether using data augmentation or not")
parser.add_argument('--distill_logit', action='store_true',
help="Whether using distillation logits or not")
parser.add_argument('--distill_rep', action='store_true',
help="Whether using distillation reps or not")
parser.add_argument('--distill_attn', action='store_true',
help="Whether using distillation attns or not")
parser.add_argument('--temperature', type=float, default=1.)
# quantization params
parser.add_argument("--weight_bits", default=1, type=int, help="number of bits for weight")
parser.add_argument("--weight_quant_method", default='bwn', type=str,
choices=['bwn', 'uniform'],
help="weight quantization methods")
parser.add_argument("--input_bits", default=1, type=int,
help="number of bits for activation")
parser.add_argument("--input_quant_method", default='uniform', type=str,
help="weight quantization methods")
parser.add_argument('--not_quantize_attention', action='store_true', help="Keep attention calculations in 32-bit.")
parser.add_argument('--learnable_scaling', action='store_true', default=True)
parser.add_argument("--ACT2FN", default='relu', type=str, help='use relu for positive outputs.')
# training config
parser.add_argument('--sym_quant_ffn_attn', action='store_true',
help='whether use sym quant for attn score and ffn after act') # default asym
parser.add_argument('--sym_quant_qkvo', action='store_true', default=True,
help='whether use asym quant for Q/K/V and others.') # default sym
parser.add_argument('--clip_init_file', default='threshold_std.pkl', help='files to restore init clip values.')
parser.add_argument('--clip_init_val', default=2.5, type=float, help='init value of clip_vals, default to (-2.5, +2.5).')
parser.add_argument('--clip_lr', default=1e-4, type=float, help='Use a seperate lr for clip_vals / stepsize')
parser.add_argument('--clip_wd', default=0.0, type=float, help='weight decay for clip_vals / stepsize')
# layerwise quantization config
parser.add_argument('--embed_layerwise', default=False, type=lambda x: bool(int(x)))
parser.add_argument('--weight_layerwise', default=True, type=lambda x: bool(int(x)))
parser.add_argument('--input_layerwise', default=True, type=lambda x: bool(int(x)))
parser.add_argument('--version_2_with_negative',
action='store_true')
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json "
"output file.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument('--null_score_diff_threshold',
type=float, default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.")
args = parser.parse_args()
args.do_lower_case = True
log_dir = os.path.join(args.output_dir, 'record_%s.log' % args.job_id)
init_logging(log_dir)
print_args(vars(args))
# Prepare devices
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
logging.info("device: {} n_gpu: {}".format(device, n_gpu))
# Prepare seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.teacher_model, do_lower_case=True)
config = BertConfig.from_pretrained(args.teacher_model)
config.num_labels = 2
student_config = copy.deepcopy(config)
student_config.weight_bits = args.weight_bits
student_config.input_bits = args.input_bits
student_config.weight_quant_method = args.weight_quant_method
student_config.input_quant_method = args.input_quant_method
student_config.clip_init_val = args.clip_init_val
student_config.learnable_scaling = args.learnable_scaling
student_config.sym_quant_qkvo = args.sym_quant_qkvo
student_config.sym_quant_ffn_attn = args.sym_quant_ffn_attn
student_config.embed_layerwise = args.embed_layerwise
student_config.weight_layerwise = args.weight_layerwise
student_config.input_layerwise = args.input_layerwise
student_config.hidden_act = args.ACT2FN
student_config.not_quantize_attention = args.not_quantize_attention
logging.info("***** Training data *****")
input_file = 'train-v2.0.json' if args.version_2_with_negative else 'train-v1.1.json'
input_file = os.path.join(args.data_dir,input_file)
if os.path.exists(input_file+'.features.pkl'):
logging.info(" loading from cache %s", input_file+'.features.pkl')
train_features = pickle.load(open(input_file+'.features.pkl', 'rb'))
else:
_, train_examples = read_squad_examples(input_file=input_file, is_training=True,
version_2_with_negative=args.version_2_with_negative)
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True)
pickle.dump(train_features, open(input_file+'.features.pkl','wb'))
args.batch_size = args.batch_size // args.gradient_accumulation_steps
num_train_optimization_steps = int(
len(train_features) / args.batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
logging.info(" Num examples = %d", len(train_features))
logging.info(" Num total steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.batch_size)
logging.info("***** Evaluation data *****")
input_file = 'dev-v2.0.json' if args.version_2_with_negative else 'dev-v1.1.json'
args.dev_file = os.path.join(args.data_dir,input_file)
dev_dataset, eval_examples = read_squad_examples(
input_file=args.dev_file, is_training=False,
version_2_with_negative=args.version_2_with_negative)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
logging.info(" Num examples = %d", len(eval_features))
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
if not args.do_eval:
if hasattr(config, "input_bits") and config.input_bits < 32:
teacher_model = QuantBertForQuestionAnswering.from_pretrained(args.teacher_model, config=config)
else:
teacher_model = BertForQuestionAnswering.from_pretrained(args.teacher_model, config=config)
teacher_model.to(device)
if n_gpu > 1:
teacher_model = torch.nn.DataParallel(teacher_model)
student_model = QuantBertForQuestionAnswering.from_pretrained(args.student_model, config=student_config)
student_model.to(device)
if n_gpu > 1:
student_model = torch.nn.DataParallel(student_model)
learner = KDLearner(args, device, student_model, teacher_model,num_train_optimization_steps)
if args.do_eval:
""" evaluation """
learner.eval(student_model, eval_dataloader, eval_features, eval_examples, dev_dataset)
return 0
learner.args.distill_logit = True
learner.args.distill_rep = True
learner.args.distill_attn = False
learner.build(lr=args.learning_rate)
learner.train(train_dataloader, eval_dataloader, eval_features, eval_examples, dev_dataset)
del learner
return 0
if __name__ == "__main__":
main()
|
bit-main
|
quant_task_distill_squad.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# 2022.09.25 - Add default learning rate and batch size
# Meta Platforms, Inc. <[email protected]>
#
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import sys
import csv
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
import torch
from torch.utils.data import TensorDataset
logger = logging.getLogger()
acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte"]
corr_tasks = ["sts-b"]
mcc_tasks = ["cola"]
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.seq_length = seq_length
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_aug_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
if set_type == 'test':
label = None
else:
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_aug_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
if set_type == 'test':
label = None
else:
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")),
"test")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_aug_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
if set_type == 'test':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[1]
label = None
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
else:
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_aug_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == 'test':
text_a = line[1]
label = None
else:
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_aug_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
if set_type== 'test':
label = None
else:
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_aug_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
if set_type=='test':
text_a = line[1]
text_b = line[2]
label = None
else:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_aug_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
if set_type=='test':
text_a = line[1]
text_b = line[2]
label = None
else:
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_aug_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
if set_type=='test':
text_a = line[1]
text_b = line[2]
label = None
else:
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
seq_length = len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
try:
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
except:
label_id = 0
if ex_index < 1:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: {}".format(example.label))
logger.info("label_id: {}".format(label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
seq_length=seq_length))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def get_tensor_data(output_mode, features):
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_label_ids, all_seq_lengths)
return tensor_data, all_label_ids
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor
}
output_modes = {
"cola": "classification",
"mnli": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification"
}
# intermediate distillation default parameters
default_params = {
"cola": {"num_train_epochs": 50, "max_seq_length": 64, "batch_size": 16, "learning_rate": 5e-4 },
"mnli": {"num_train_epochs": 6, "max_seq_length": 128, "batch_size": 16, "learning_rate": 2e-4 },
"mrpc": {"num_train_epochs": 20, "max_seq_length": 128, "batch_size": 8, "learning_rate": 5e-4 },
"sst-2": {"num_train_epochs": 10, "max_seq_length": 64, "batch_size": 8, "learning_rate": 1e-4 },
"sts-b": {"num_train_epochs": 20, "max_seq_length": 128, "batch_size": 8, "learning_rate": 5e-4 },
"qqp": {"num_train_epochs": 6, "max_seq_length": 128, "batch_size": 32, "learning_rate": 2e-4 },
"qnli": {"num_train_epochs": 10, "max_seq_length": 128, "batch_size": 8, "learning_rate": 2e-4 },
"rte": {"num_train_epochs": 20, "max_seq_length": 128, "batch_size": 8, "learning_rate": 5e-4 }
}
|
bit-main
|
utils_glue.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# 2022.09.25 - Add distill_attn argument for removing attention distillation
# Meta Platforms, Inc. <[email protected]>
#
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import logging
from transformer.file_utils import WEIGHTS_NAME, CONFIG_NAME, MISC_NAME
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss
from transformer.optimization import BertAdam
from helper import *
from utils_squad import *
import numpy as np
import pickle
logging.basicConfig(level=logging.INFO)
class KDLearner(object):
def __init__(self, args, device, student_model, teacher_model=None, num_train_optimization_steps = None):
self.args = args
self.device = device
self.n_gpu = torch.cuda.device_count()
self.student_model = student_model
self.teacher_model = teacher_model
self.num_train_optimization_steps = num_train_optimization_steps
self._check_params()
self.name = 'kd_' # learner suffix for saving
def build(self, lr=None):
self.prev_global_step = 0
if (self.args.distill_rep or self.args.distill_attn) and not self.args.distill_logit:
self.stage = 'kd_stage1'
elif self.args.distill_logit and not (self.args.distill_rep or self.args.distill_attn):
self.stage = 'kd_stage2'
elif self.args.distill_logit and (self.args.distill_rep or self.args.distill_attn):
self.stage = 'kd_joint'
else:
self.stage = 'nokd'
self.output_dir = os.path.join(self.args.output_dir, self.stage)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
param_optimizer = list(self.student_model.named_parameters())
self.clip_params = {}
for k, v in param_optimizer:
if 'clip_' in k:
self.clip_params[k] = v
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if (not any(nd in n for nd in no_decay) and not 'clip_' in n)], 'weight_decay': self.args.weight_decay},
{'params': [p for n, p in param_optimizer if (any(nd in n for nd in no_decay) and not 'clip_' in n)], 'weight_decay': 0.0},
{'params': [p for n, p in self.clip_params.items()], 'lr': self.args.clip_lr, 'weight_decay': self.args.clip_wd},
]
schedule = 'warmup_linear'
learning_rate = self.args.learning_rate if not lr else lr
self.optimizer = BertAdam(optimizer_grouped_parameters,
schedule=schedule,
lr=learning_rate,
warmup=self.args.warmup_proportion,
t_total=self.num_train_optimization_steps)
logging.info("Optimizer prepared.")
self._check_quantized_modules()
self._setup_grad_scale_stats()
def eval(self, model,dataloader,features,examples,dataset):
all_results = []
for _,batch_ in tqdm(enumerate(dataloader)):
batch_ = tuple(t.to(self.device) for t in batch_)
input_ids, input_mask, segment_ids, example_indices = batch_
with torch.no_grad():
(batch_start_logits, batch_end_logits),_,_ = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
return write_predictions(examples, features, all_results,
self.args.n_best_size, self.args.max_answer_length,
True, False,
self.args.version_2_with_negative, self.args.null_score_diff_threshold,dataset)
def train(self, train_dataloader, eval_dataloader, eval_features, eval_examples, dev_dataset):
""" quant-aware pretraining + KD """
# Prepare loss functions
loss_mse = MSELoss()
self.teacher_model.eval()
teacher_results = self.eval(self.teacher_model, eval_dataloader,eval_features,eval_examples, dev_dataset)
logging.info("Teacher network evaluation")
for key in sorted(teacher_results.keys()):
logging.info(" %s = %s", key, str(teacher_results[key]))
# self.teacher_model.train() # switch to train mode to supervise students
# Train and evaluate
# num_layers = self.student_model.config.num_hidden_layers + 1
global_step = 0
best_dev_f1 = 0.0
output_eval_file = os.path.join(self.output_dir, "eval_results.txt")
logging.info(" Distill rep: %d, Distill attn: %d, Distill logit: %d" % (self.args.distill_rep, self.args.distill_attn, self.args.distill_logit))
logging.info(" Batch size = %d", self.args.batch_size)
logging.info(" Num steps = %d", self.num_train_optimization_steps)
global_tr_loss = 0 # record global average training loss to plot
for epoch_ in range(int(self.args.num_train_epochs)):
tr_loss = 0.
tr_att_loss = 0.
tr_rep_loss = 0.
tr_cls_loss = 0.
for step, batch in enumerate(train_dataloader):
self.student_model.train()
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
att_loss = 0.
rep_loss = 0.
cls_loss = 0.
rep_loss_layerwise = []
att_loss_layerwise = []
loss = 0.
if self.args.distill_logit or (self.args.distill_rep or self.args.distill_attn):
# use distillation
student_logits, student_atts, student_reps = self.student_model(input_ids, segment_ids, input_mask)
with torch.no_grad():
teacher_logits, teacher_atts, teacher_reps = self.teacher_model(input_ids, segment_ids, input_mask)
# NOTE: config loss according to stage
if self.args.distill_logit:
soft_start_ce_loss = soft_cross_entropy(student_logits[0], teacher_logits[0])
soft_end_ce_loss = soft_cross_entropy(student_logits[1], teacher_logits[1])
cls_loss = soft_start_ce_loss+soft_end_ce_loss
loss+=cls_loss
tr_cls_loss += cls_loss.item()
if (self.args.distill_rep or self.args.distill_attn):
for student_att, teacher_att in zip(student_atts, teacher_atts):
student_att = torch.where(student_att <= -1e2, torch.zeros_like(student_att).to(self.device),
student_att)
teacher_att = torch.where(teacher_att <= -1e2, torch.zeros_like(teacher_att).to(self.device),
teacher_att)
tmp_loss = loss_mse(student_att, teacher_att)
att_loss += tmp_loss
att_loss_layerwise.append(tmp_loss.item())
for student_rep, teacher_rep in zip(student_reps, teacher_reps):
tmp_loss = loss_mse(student_rep, teacher_rep)
rep_loss += tmp_loss
rep_loss_layerwise.append(tmp_loss.item())
# rep_loss_layerwise = rep_loss_layerwise[1:] # remove embed dist
tr_att_loss += att_loss.item()
tr_rep_loss += rep_loss.item()
if self.args.distill_rep:
loss += rep_loss
if self.args.distill_attn:
loss += att_loss
else:
cls_loss, _, _ = self.student_model(input_ids, segment_ids, input_mask,start_positions, end_positions)
loss+=cls_loss
tr_cls_loss += cls_loss.item()
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
global_tr_loss += loss.item()
# evaluation and save model
if global_step % self.args.eval_step == 0 or \
global_step == len(train_dataloader)-1:
logging.info("***** KDLearner %s Running evaluation, Job_id: %s *****" % (self.stage, self.args.job_id))
logging.info(" Epoch = {} iter {} step".format(epoch_, global_step))
logging.info(f" Previous best = {best_dev_f1}")
loss = tr_loss / (step + 1)
global_avg_loss = global_tr_loss / (global_step + 1)
cls_loss = tr_cls_loss / (step + 1)
att_loss = tr_att_loss / (step + 1)
rep_loss = tr_rep_loss / (step + 1)
self.student_model.eval()
result = self.eval(self.student_model, eval_dataloader,eval_features,eval_examples, dev_dataset)
result['global_step'] = global_step
result['train_cls_loss'] = cls_loss
result['att_loss'] = att_loss
result['rep_loss'] = rep_loss
result['loss'] = loss
result['global_loss'] = global_avg_loss
if (self.args.distill_rep or self.args.distill_attn):
# add the layerwise loss on rep and att
logging.info("embedding layer rep_loss: %.8f" % (rep_loss_layerwise[0]))
rep_loss_layerwise = rep_loss_layerwise[1:]
for lid in range(len(rep_loss_layerwise)):
logging.info("layer %d rep_loss: %.8f" % (lid+1, rep_loss_layerwise[lid]))
logging.info("layer %d att_loss: %.8f" % (lid+1, att_loss_layerwise[lid]))
result_to_file(result, output_eval_file)
save_model = False
if result['f1'] > best_dev_f1:
best_dev_f1 = result['f1']
save_model = True
if save_model:
self._save()
# if self.args.quantize_weight:
# self.quanter.restore()
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.optimizer.step()
self.optimizer.zero_grad()
global_step += 1
def _save(self):
logging.info("******************** Save model ********************")
model_to_save = self.student_model.module if hasattr(self.student_model, 'module') else self.student_model
output_model_file = os.path.join(self.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(self.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def _check_params(self):
if not self.args.do_eval:
assert self.teacher_model, 'teacher model must not be None in train mode.'
def _check_quantized_modules(self):
logging.info("Checking module types.")
for k, m in self.student_model.named_modules():
if isinstance(m, torch.nn.Linear):
logging.info('%s: %s' % (k, str(m)))
def _setup_grad_scale_stats(self):
self.grad_scale_stats = {'weight': None, \
'bias': None, \
'layer_norm': None, \
'step_size/clip_val': None}
self.ema_grad = 0.9
def check_grad_scale(self):
logging.info("Check grad scale ratio: grad/w")
for k, v in self.student_model.named_parameters():
if v.grad is not None:
has_grad = True
ratio = v.grad.norm(p=2) / v.data.norm(p=2)
# print('%.6e, %s' % (ratio.float(), k))
else:
has_grad = False
logging.info('params: %s has no gradient' % k)
continue
# update grad_scale stats
if 'weight' in k and v.ndimension() == 2:
key = 'weight'
elif 'bias' in k and v.ndimension() == 1:
key = 'bias'
elif 'LayerNorm' in k and 'weight' in k and v.ndimension() == 1:
key = 'layer_norm'
elif 'clip_' in k:
key = 'step_size/clip_val'
else:
key = None
if key and has_grad:
if self.grad_scale_stats[key]:
self.grad_scale_stats[key] = self.ema_grad * self.grad_scale_stats[key] + (1-self.ema_grad) * ratio
else:
self.grad_scale_stats[key] = ratio
for (key, val) in self.grad_scale_stats.items():
if val is not None:
logging.info('%.6e, %s' % (val, key))
|
bit-main
|
kd_learner_squad.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# 2022.09.25 - Add support for using quantized Bert model as teacher
# Meta Platforms, Inc. <[email protected]>
#
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import copy
from kd_learner_glue import KDLearner
from helper import *
from utils_glue import *
from transformer.tokenization import BertTokenizer
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler,TensorDataset
from transformer.configuration_bert import BertConfig
from transformer.modeling_bert import BertForSequenceClassification
from transformer.modeling_bert_quant import BertForSequenceClassification as QuantBertForSequenceClassification
from transformer.file_utils import WEIGHTS_NAME, CONFIG_NAME
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--job_id", default='tmp', type=str, help='jobid to save training logs')
parser.add_argument("--data_dir", default=None, type=str,help="The root dir of glue data")
parser.add_argument("--teacher_model", default='', type=str, help="The teacher model dir.")
parser.add_argument("--student_model", default='', type=str, help="The student model dir.")
parser.add_argument("--vocab_dir", default='', type=str, help="The vocab.txt dir.")
parser.add_argument("--task_name", default=None, type=str, help="The name of the glue task to train.")
parser.add_argument("--output_dir", default='output', type=str,help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--max_seq_length", default=None, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--batch_size", default=None, type=int, help="Total batch size for training.")
parser.add_argument("--learning_rate", default=None, type=float, help="The initial learning rate for Adam.")
parser.add_argument('--weight_decay', '--wd', default=0.01, type=float, metavar='W', help='weight decay')
parser.add_argument("--num_train_epochs", default=None, type=int, help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--do_eval", action='store_true')
parser.add_argument('--eval_step', type=int, default=100)
# distillation params
parser.add_argument('--aug_train', action='store_true',
help="Whether using data augmentation or not")
parser.add_argument('--distill_logit', action='store_true',
help="Whether using distillation logits or not")
parser.add_argument('--distill_rep', action='store_true',
help="Whether using distillation reps or not")
parser.add_argument('--distill_attn', action='store_true',
help="Whether using distillation attns or not")
parser.add_argument('--temperature', type=float, default=1.)
# quantization params
parser.add_argument("--weight_bits", default=1, type=int, help="number of bits for weight")
parser.add_argument("--weight_quant_method", default='bwn', type=str,
choices=['bwn', 'uniform'],
help="weight quantization methods")
parser.add_argument("--input_bits", default=1, type=int,
help="number of bits for activation")
parser.add_argument("--input_quant_method", default='uniform', type=str,
help="weight quantization methods")
parser.add_argument('--not_quantize_attention', action='store_true', help="Keep attention calculations in 32-bit.")
parser.add_argument('--learnable_scaling', action='store_true', default=True)
parser.add_argument("--ACT2FN", default='relu', type=str, help='use relu for positive outputs.')
# training config
parser.add_argument('--sym_quant_ffn_attn', action='store_true',
help='whether use sym quant for attn score and ffn after act') # default asym
parser.add_argument('--sym_quant_qkvo', action='store_true', default=True,
help='whether use asym quant for Q/K/V and others.') # default sym
parser.add_argument('--clip_init_file', default='threshold_std.pkl', help='files to restore init clip values.')
parser.add_argument('--clip_init_val', default=2.5, type=float, help='init value of clip_vals, default to (-2.5, +2.5).')
parser.add_argument('--clip_lr', default=1e-4, type=float, help='Use a seperate lr for clip_vals / stepsize')
parser.add_argument('--clip_wd', default=0.0, type=float, help='weight decay for clip_vals / stepsize')
# layerwise quantization config
parser.add_argument('--embed_layerwise', default=False, type=lambda x: bool(int(x)))
parser.add_argument('--weight_layerwise', default=True, type=lambda x: bool(int(x)))
parser.add_argument('--input_layerwise', default=True, type=lambda x: bool(int(x)))
args = parser.parse_args()
args.do_lower_case = True
log_dir = os.path.join(args.output_dir, 'record_%s.log' % args.job_id)
init_logging(log_dir)
# Prepare devices
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
logging.info("device: {} n_gpu: {}".format(device, n_gpu))
# Prepare seed
random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# Prepare task settings
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
# restore the default setting if they are None
if args.learning_rate is None:
if task_name in default_params:
args.learning_rate = default_params[task_name]["learning_rate"]
if args.num_train_epochs is None:
if task_name in default_params:
args.num_train_epochs = default_params[task_name]["num_train_epochs"]
if args.batch_size is None:
if task_name in default_params:
args.batch_size = default_params[task_name]["batch_size"]
#args.batch_size = int(args.batch_size*n_gpu)
if args.max_seq_length == None:
if task_name in default_params:
args.max_seq_length = default_params[task_name]["max_seq_length"]
if task_name not in processors:
raise ValueError("Task not found: %s" % task_name)
print_args(vars(args))
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
tokenizer = BertTokenizer.from_pretrained(args.vocab_dir, do_lower_case=args.do_lower_case)
config = BertConfig.from_pretrained(args.teacher_model)
config.num_labels = num_labels
student_config = copy.deepcopy(config)
student_config.weight_bits = args.weight_bits
student_config.input_bits = args.input_bits
student_config.weight_quant_method = args.weight_quant_method
student_config.input_quant_method = args.input_quant_method
student_config.clip_init_val = args.clip_init_val
student_config.learnable_scaling = args.learnable_scaling
student_config.sym_quant_qkvo = args.sym_quant_qkvo
student_config.sym_quant_ffn_attn = args.sym_quant_ffn_attn
student_config.embed_layerwise = args.embed_layerwise
student_config.weight_layerwise = args.weight_layerwise
student_config.input_layerwise = args.input_layerwise
student_config.hidden_act = args.ACT2FN
student_config.not_quantize_attention = args.not_quantize_attention
num_train_optimization_steps = 0
if not args.do_eval:
if args.aug_train:
train_examples = processor.get_aug_examples(args.data_dir)
else:
train_examples = processor.get_train_examples(args.data_dir)
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.batch_size = args.batch_size // args.gradient_accumulation_steps
train_features = convert_examples_to_features(train_examples, label_list,
args.max_seq_length, tokenizer, output_mode)
train_data, _ = get_tensor_data(output_mode, train_features)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.batch_size)
num_train_optimization_steps = int(
len(train_features) / args.batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
eval_data, eval_labels = get_tensor_data(output_mode, eval_features)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
if task_name == "mnli":
processor = processors["mnli-mm"]()
if not os.path.exists(args.output_dir + '-MM'):
os.makedirs(args.output_dir + '-MM')
mm_eval_examples = processor.get_dev_examples(args.data_dir)
mm_eval_features = convert_examples_to_features(
mm_eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
mm_eval_data, mm_eval_labels = get_tensor_data(output_mode, mm_eval_features)
logging.info("***** Running mm evaluation *****")
logging.info(" Num examples = %d", len(mm_eval_examples))
mm_eval_sampler = SequentialSampler(mm_eval_data)
mm_eval_dataloader = DataLoader(mm_eval_data, sampler=mm_eval_sampler,
batch_size=args.batch_size)
else:
mm_eval_labels = None
mm_eval_dataloader = None
if not args.do_eval: # need the teacher model for training
if hasattr(config, "input_bits") and config.input_bits < 32:
teacher_model = QuantBertForSequenceClassification.from_pretrained(args.teacher_model, config=config)
else:
teacher_model = BertForSequenceClassification.from_pretrained(args.teacher_model, config=config)
teacher_model.to(device)
if n_gpu > 1:
teacher_model = torch.nn.DataParallel(teacher_model)
else:
teacher_model = None
student_model = QuantBertForSequenceClassification.from_pretrained(args.student_model, config=student_config)
student_model.to(device)
if n_gpu > 1:
student_model = torch.nn.DataParallel(student_model)
learner = KDLearner(args, device, student_model, teacher_model,num_train_optimization_steps)
learner.args.distill_logit = True
learner.args.distill_rep = True
learner.args.distill_attn = False
learner.build(lr=args.learning_rate)
learner.train(train_examples, task_name, output_mode, eval_labels,
num_labels, train_dataloader, eval_dataloader, eval_examples, tokenizer,
mm_eval_dataloader=mm_eval_dataloader, mm_eval_labels=mm_eval_labels)
del learner
return 0
if __name__ == "__main__":
main()
|
bit-main
|
quant_task_distill_glue.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import json
import logging
import os
from io import open
from .file_utils import cached_path, CONFIG_NAME
logger = logging.getLogger(__name__)
class PretrainedConfig(object):
r""" Base class for all configuration classes.
Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations.
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights.
It only affects the model's configuration.
Class attributes (overridden by derived classes):
- ``pretrained_config_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained model configurations as values.
Parameters:
``finetuning_task``: string, default `None`. Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.
``num_labels``: integer, default `2`. Number of classes to use when the model is a classification model (sequences/tokens)
``output_attentions``: boolean, default `False`. Should the model returns attentions weights.
``output_hidden_states``: string, default `False`. Should the model returns all hidden-states.
``torchscript``: string, default `False`. Is the model used with Torchscript.
"""
pretrained_config_archive_map = {}
def __init__(self, **kwargs):
self.finetuning_task = kwargs.pop('finetuning_task', None)
self.num_labels = kwargs.pop('num_labels', 2)
self.output_attentions = kwargs.pop('output_attentions', False)
self.output_hidden_states = kwargs.pop('output_hidden_states', False)
self.output_past = kwargs.pop('output_past', True) # Not used by all models
self.torchscript = kwargs.pop('torchscript', False) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop('use_bfloat16', False)
self.pruned_heads = kwargs.pop('pruned_heads', {})
def save_pretrained(self, save_directory):
""" Save a configuration object to the directory `save_directory`, so that it
can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
logger.info("Configuration saved in {}".format(output_config_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r""" Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing a configuration file saved using the :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.: ``./my_model_directory/configuration.json``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
kwargs: (`optional`) dict: key/value pairs with which to update the configuration object after loading.
- The values in kwargs of any keys which are configuration attributes will be used to override the loaded values.
- Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
return_unused_kwargs: (`optional`) bool:
- If False, then this function returns just the final configuration object.
- If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part of kwargs which has not been used to update `config` and is otherwise ignored.
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
assert config.output_attention == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
foo=False, return_unused_kwargs=True)
assert config.output_attention == True
assert unused_kwargs == {'foo': False}
"""
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
logger.info("loading configuration file {}".format(config_file))
# Load config
config = cls.from_json_file(config_file)
if hasattr(config, 'pruned_heads'):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", str(config))
return config
@classmethod
def from_dict(cls, json_object):
"""Constructs a `Config` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
setattr(config, key, value)
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
|
bit-main
|
transformer/configuration_utils.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
'bert-base-german-dbmdz-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json",
'bert-base-german-dbmdz-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json",
}
class BertConfig(PretrainedConfig):
r"""
:class:`~transformers.BertConfig` is the configuration class to store the configuration of a
`BertModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size_or_config_json_file=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(BertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
|
bit-main
|
transformer/configuration_bert.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# 2022.09.25 - Add elastic quantization support
# Meta Platforms, Inc. <[email protected]>
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from .file_utils import WEIGHTS_NAME, CONFIG_NAME
from .configuration_bert import BertConfig
from .utils_quant import QuantizeLinear, QuantizeEmbedding, act_quant_fn, AlphaInit
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "",
}
BERT_CONFIG_NAME = 'bert_config.json'
TF_WEIGHTS_NAME = 'model.ckpt'
class LearnableBias(nn.Module):
def __init__(self, out_chn):
super(LearnableBias, self).__init__()
self.bias = nn.Parameter(torch.zeros(out_chn), requires_grad=True)
def forward(self, x):
out = x + self.bias.expand_as(x)
return out
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info(
"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu}
NORM = {'layer_norm': BertLayerNorm}
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = QuantizeEmbedding(config.vocab_size, config.hidden_size, padding_idx=0,
clip_val=config.clip_init_val,
weight_bits=config.weight_bits,
weight_quant_method=config.weight_quant_method,
embed_layerwise=config.embed_layerwise,
learnable=config.learnable_scaling,
symmetric=config.sym_quant_qkvo)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.input_bits = config.input_bits
self.sym_quant_ffn_attn = config.sym_quant_ffn_attn
self.sym_quant_qkvo = config.sym_quant_qkvo
self.input_layerwise = config.input_layerwise
self.input_quant_method = config.input_quant_method
self.quantize_attention_probs = not config.not_quantize_attention
self.query = QuantizeLinear(config.hidden_size, self.all_head_size, clip_val=config.clip_init_val,
weight_bits=config.weight_bits, input_bits=config.input_bits,
weight_layerwise=config.weight_layerwise, input_layerwise=config.input_layerwise,
weight_quant_method=config.weight_quant_method,
input_quant_method=config.input_quant_method,
learnable=config.learnable_scaling, symmetric=config.sym_quant_qkvo)
self.key = QuantizeLinear(config.hidden_size, self.all_head_size, clip_val=config.clip_init_val,
weight_bits=config.weight_bits, input_bits=config.input_bits,
weight_layerwise=config.weight_layerwise, input_layerwise=config.input_layerwise,
weight_quant_method=config.weight_quant_method,
input_quant_method=config.input_quant_method,
learnable=config.learnable_scaling, symmetric=config.sym_quant_qkvo)
self.value = QuantizeLinear(config.hidden_size, self.all_head_size, clip_val=config.clip_init_val,
weight_bits=config.weight_bits, input_bits=config.input_bits,
weight_layerwise=config.weight_layerwise, input_layerwise=config.input_layerwise,
weight_quant_method=config.weight_quant_method,
input_quant_method=config.input_quant_method,
learnable=config.learnable_scaling, symmetric=config.sym_quant_qkvo)
self.move_q = LearnableBias(self.all_head_size)
self.move_k = LearnableBias(self.all_head_size)
self.move_v = LearnableBias(self.all_head_size)
if config.input_quant_method == 'uniform' and config.input_bits < 32:
self.register_buffer('clip_query', torch.Tensor([-config.clip_init_val, config.clip_init_val]))
self.register_buffer('clip_key', torch.Tensor([-config.clip_init_val, config.clip_init_val]))
self.register_buffer('clip_value', torch.Tensor([-config.clip_init_val, config.clip_init_val]))
self.register_buffer('clip_attn', torch.Tensor([-config.clip_init_val, config.clip_init_val]))
if config.learnable_scaling:
self.clip_query = nn.Parameter(self.clip_query)
self.clip_key = nn.Parameter(self.clip_key)
self.clip_value = nn.Parameter(self.clip_value)
self.clip_attn = nn.Parameter(self.clip_attn)
elif (config.input_quant_method == 'elastic' or config.input_quant_method == 'bwn') and config.input_bits < 32:
self.clip_query = AlphaInit(torch.tensor(1.0))
self.clip_key = AlphaInit(torch.tensor(1.0))
self.clip_value = AlphaInit(torch.tensor(1.0))
self.clip_attn = AlphaInit(torch.tensor(1.0))
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[
:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, output_att=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if self.input_bits < 32:
query_layer = self.move_q(mixed_query_layer)
key_layer = self.move_k(mixed_key_layer)
value_layer = self.move_v(mixed_value_layer)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
if self.input_bits < 32:
query_layer = act_quant_fn(query_layer, self.clip_query, self.input_bits, quant_method=self.input_quant_method,
symmetric=self.sym_quant_qkvo, layerwise=self.input_layerwise)
key_layer = act_quant_fn(key_layer, self.clip_key, self.input_bits, quant_method=self.input_quant_method,
symmetric=self.sym_quant_qkvo, layerwise=self.input_layerwise)
value_layer = act_quant_fn(value_layer, self.clip_value, self.input_bits, quant_method=self.input_quant_method,
symmetric=self.sym_quant_qkvo, layerwise=self.input_layerwise)
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / \
math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if self.input_bits < 32 and self.quantize_attention_probs:
attention_probs = act_quant_fn(attention_probs, self.clip_attn, self.input_bits, quant_method=self.input_quant_method,
symmetric=self.sym_quant_ffn_attn, layerwise=self.input_layerwise)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output, layer_att = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output, layer_att
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = QuantizeLinear(config.hidden_size, config.hidden_size, clip_val=config.clip_init_val,
weight_bits=config.weight_bits, input_bits=config.input_bits,
weight_layerwise=config.weight_layerwise, input_layerwise=config.input_layerwise,
weight_quant_method=config.weight_quant_method,
input_quant_method=config.input_quant_method,
learnable=config.learnable_scaling, symmetric=config.sym_quant_qkvo)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = QuantizeLinear(config.hidden_size, config.intermediate_size, clip_val=config.clip_init_val,
weight_bits=config.weight_bits, input_bits=config.input_bits,
weight_layerwise=config.weight_layerwise, input_layerwise=config.input_layerwise,
weight_quant_method=config.weight_quant_method,
input_quant_method=config.input_quant_method,
learnable=config.learnable_scaling, symmetric=config.sym_quant_qkvo)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = QuantizeLinear(config.intermediate_size, config.hidden_size, clip_val=config.clip_init_val,
weight_bits=config.weight_bits, input_bits=config.input_bits,
weight_layerwise=config.weight_layerwise, input_layerwise=config.input_layerwise,
weight_quant_method=config.weight_quant_method,
input_quant_method=config.input_quant_method,
learnable=config.learnable_scaling, symmetric=config.sym_quant_ffn_attn)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output, layer_att = self.attention(
hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, layer_att
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.layer = nn.ModuleList([BertLayer(config)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask):
all_encoder_layers = []
all_encoder_atts = []
for _, layer_module in enumerate(self.layer):
all_encoder_layers.append(hidden_states)
hidden_states, layer_att = layer_module(
hidden_states, attention_mask)
all_encoder_atts.append(layer_att)
all_encoder_layers.append(hidden_states)
return all_encoder_layers, all_encoder_atts
class BertPooler(nn.Module):
def __init__(self, config, recurs=None):
super(BertPooler, self).__init__()
self.dense = QuantizeLinear(config.hidden_size, config.hidden_size, clip_val=config.clip_init_val,
weight_bits=config.weight_bits, input_bits=config.input_bits,
weight_layerwise=config.weight_layerwise, input_layerwise=config.input_layerwise,
weight_quant_method=config.weight_quant_method,
input_quant_method=config.input_quant_method,
learnable=config.learnable_scaling, symmetric=config.sym_quant_qkvo)
self.activation = nn.Tanh()
self.config = config
def forward(self, hidden_states):
pooled_output = hidden_states[-1][:, 0]
pooled_output = self.dense(pooled_output)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_scratch(cls, pretrained_model_name_or_path, *inputs, **kwargs):
resolved_config_file = os.path.join(
pretrained_model_name_or_path, CONFIG_NAME)
config = BertConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
model = cls(config, *inputs, **kwargs)
return model
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
config = kwargs.get('config', None)
kwargs.pop('config', None)
if config is None:
# Load config
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(
pretrained_model_name_or_path, WEIGHTS_NAME)
logger.info("Loading model {}".format(weights_path))
state_dict = torch.load(weights_path, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(
pretrained_model_name_or_path, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
logger.info('loading model...')
load(model, prefix=start_prefix)
logger.info('done!')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: output a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: output only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None,
output_all_encoded_layers=True, output_att=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers, layer_atts = self.encoder(embedding_output,
extended_attention_mask)
pooled_output = self.pooler(encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
if not output_att:
return encoded_layers, pooled_output
return encoded_layers, layer_atts, pooled_output
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None,
attention_mask=None, labels=None,
output_att=False, output_hidden=False):
sequence_output, att_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=True, output_att=True)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss, att_output, sequence_output
else:
return logits, att_output, sequence_output
class BertForQuestionAnswering(BertPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]"
input_ids = tokenizer.encode(input_text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))
# a nice puppet
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = 2
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, self.num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids=None, token_type_ids=None, attention_mask=None,
start_positions=None, end_positions=None):
sequence_output, att_output, _ = self.bert(input_ids,token_type_ids,attention_mask)
logits = self.qa_outputs(sequence_output[-1])
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss, att_output, sequence_output
return (start_logits, end_logits), att_output, sequence_output
|
bit-main
|
transformer/modeling_bert_quant.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import logging
import math
class LearnableBias(nn.Module):
def __init__(self, out_chn):
super(LearnableBias, self).__init__()
self.bias = nn.Parameter(torch.zeros(out_chn), requires_grad=True)
def forward(self, x):
out = x + self.bias.expand_as(x)
return out
class ElasticQuantBinarizerSigned(torch.autograd.Function):
"""
Modified from Learned Step-size Quantization.
https://arxiv.org/abs/1902.08153
"""
@staticmethod
def forward(ctx, input, alpha, num_bits, layerwise):
"""
:param input: input to be quantized
:param alpha: the step size
:param num_bits: quantization bits
:param layerwise: rowwise quant
:return: quantized output
"""
if not layerwise:
# TODO
raise NotImplementedError
ctx.num_bits = num_bits
if num_bits == 32:
return input
if num_bits == 1:
Qn = -1
Qp = 1
else:
Qn = -2 ** (num_bits - 1)
Qp = 2 ** (num_bits - 1) - 1
eps = torch.tensor(0.00001).float().to(alpha.device)
if alpha.item() == 1.0 and (not alpha.initialized):
alpha.initialize_wrapper(input, num_bits, symmetric=True, init_method='default')
alpha = torch.where(alpha > eps, alpha, eps)
assert alpha > 0, 'alpha = {:.6f} becomes non-positive'.format(alpha)
grad_scale = 1.0 / math.sqrt(input.numel()) if not Qp else 1.0 / math.sqrt(input.numel() * Qp)
ctx.save_for_backward(input, alpha)
ctx.other = grad_scale, Qn, Qp
if num_bits == 1:
q_w = input.sign()
else:
q_w = (input / alpha).round().clamp(Qn, Qp)
w_q = q_w * alpha
return w_q
@staticmethod
def backward(ctx, grad_output):
if ctx.num_bits == 32:
return grad_output, None, None, None
input_, alpha = ctx.saved_tensors
grad_scale, Qn, Qp = ctx.other
q_w = input_ / alpha
indicate_small = (q_w < Qn).float()
indicate_big = (q_w > Qp).float()
indicate_middle = 1.0 - indicate_small - indicate_big # this is more cpu-friendly than torch.ones(input_.shape)
if ctx.num_bits == 1:
grad_alpha = ((input_.sign()) * grad_output * grad_scale).sum().unsqueeze(dim=0)
else:
grad_alpha = ((indicate_small * Qn + indicate_big * Qp + indicate_middle * (
-q_w + q_w.round())) * grad_output * grad_scale).sum().unsqueeze(dim=0)
grad_input = indicate_middle * grad_output
return grad_input, grad_alpha, None, None
class ElasticQuantBinarizerUnsigned(torch.autograd.Function):
"""
Modified from Learned Step-size Quantization.
https://arxiv.org/abs/1902.08153
"""
@staticmethod
def forward(ctx, input, alpha, num_bits, layerwise):
"""
:param input: input to be quantized
:param alpha: the step size
:param num_bits: quantization bits
:param layerwise: rowwise quant
:return: quantized output
"""
if not layerwise:
# TODO
raise NotImplementedError
ctx.num_bits = num_bits
if num_bits == 32:
return input
Qn = 0
Qp = 2 ** (num_bits) - 1
if num_bits == 1:
input_ = input
else:
min_val = input.min().item()
input_ = input - min_val
eps = torch.tensor(0.00001).float().to(alpha.device)
if alpha.item() == 1.0 and (not alpha.initialized):
alpha.initialize_wrapper(input, num_bits, symmetric=False, init_method='default')
alpha = torch.where(alpha > eps, alpha, eps)
assert alpha > 0, 'alpha = {:.6f} becomes non-positive'.format(alpha)
grad_scale = 1.0 / math.sqrt(input.numel() * Qp)
ctx.save_for_backward(input_, alpha)
ctx.other = grad_scale, Qn, Qp
q_w = (input_ / alpha).round().clamp(Qn, Qp)
w_q = q_w * alpha
if num_bits != 1:
w_q = w_q + min_val
return w_q
@staticmethod
def backward(ctx, grad_output):
if ctx.num_bits == 32:
return grad_output, None, None, None
input_, alpha = ctx.saved_tensors
grad_scale, Qn, Qp = ctx.other
q_w = input_ / alpha
indicate_small = (q_w < Qn).float()
indicate_big = (q_w > Qp).float()
indicate_middle = 1.0 - indicate_small - indicate_big # this is more cpu-friendly than torch.ones(input_.shape)
grad_alpha = ((indicate_small * Qn + indicate_big * Qp + indicate_middle * (
-q_w + q_w.round())) * grad_output * grad_scale).sum().unsqueeze(dim=0)
grad_input = indicate_middle * grad_output
return grad_input, grad_alpha, None, None
class AlphaInit(nn.Parameter):
def __init__(self, tensor):
super(AlphaInit, self).__new__(nn.Parameter, data=tensor)
self.initialized = False
def _initialize(self, init_tensor):
assert not self.initialized, 'already initialized.'
self.data.copy_(init_tensor)
self.initialized = True
def initialize_wrapper(self, tensor, num_bits, symmetric, init_method='default'):
Qp = 2 ** (num_bits - 1) - 1 if symmetric else 2 ** (num_bits) - 1
if Qp == 0:
Qp = 1.0
if init_method == 'default':
init_val = 2 * tensor.abs().mean() / math.sqrt(Qp) if symmetric \
else 4 * tensor.abs().mean() / math.sqrt(Qp)
elif init_method == 'uniform':
init_val = 1./(2*Qp+1) if symmetric else 1./Qp
self._initialize(init_val)
class BwnQuantizer(torch.autograd.Function):
"""Binary Weight Network (BWN)
Ref: https://arxiv.org/abs/1603.05279
"""
@staticmethod
def forward(ctx, input, clip_val, num_bits, layerwise):
"""
:param input: tensor to be binarized
:return: quantized tensor
"""
ctx.save_for_backward(input)
if layerwise:
s = input.size()
m = input.norm(p=1).div(input.nelement())
e = input.mean()
result = (input-e).sign().mul(m.expand(s))
else:
n = input[0].nelement() # W of size axb, return a vector of ax1
s = input.size()
m = input.norm(1, 1, keepdim=True).div(n)
e = input.mean()
result = (input-e).sign().mul(m.expand(s))
return result
@staticmethod
def backward(ctx, grad_output):
"""
:param ctx: saved non-clipped full-precision tensor and clip_val
:param grad_output: gradient ert the quantized tensor
:return: estimated gradient wrt the full-precision tensor
"""
grad_input = grad_output.clone()
return grad_input, None, None, None
def act_quant_fn(input, clip_val, num_bits, symmetric, quant_method, layerwise):
if num_bits == 32:
return input
elif quant_method == "bwn" and num_bits == 1:
quant_fn = BwnQuantizer
elif quant_method == "elastic" and num_bits >= 1 and symmetric:
quant_fn = ElasticQuantBinarizerSigned
elif quant_method == "elastic" and num_bits >= 1 and not symmetric:
quant_fn = ElasticQuantBinarizerUnsigned
else:
raise ValueError("Unknownquant_method")
input = quant_fn.apply(input, clip_val, num_bits, layerwise)
return input
def weight_quant_fn(weight, clip_val, num_bits, symmetric, quant_method, layerwise):
if num_bits == 32:
return weight
elif quant_method == "bwn" and num_bits == 1:
quant_fn = BwnQuantizer
else:
raise ValueError("Unknown quant_method")
weight = quant_fn.apply(weight, clip_val, num_bits, layerwise)
return weight
class QuantizeLinear(nn.Linear):
def __init__(self, *kargs, clip_val=2.5, weight_bits=8, input_bits=8, learnable=False, symmetric=True,
weight_layerwise=True, input_layerwise=True, weight_quant_method="twn", input_quant_method="uniform",
**kwargs):
super(QuantizeLinear, self).__init__(*kargs, **kwargs)
self.weight_bits = weight_bits
self.input_bits = input_bits
self.learnable = learnable
self.symmetric = symmetric
self.weight_layerwise = weight_layerwise
self.input_layerwise = input_layerwise
self.weight_quant_method = weight_quant_method
self.input_quant_method = input_quant_method
self._build_weight_clip_val(weight_quant_method, learnable, init_val=clip_val)
self._build_input_clip_val(input_quant_method, learnable, init_val=clip_val)
self.move = LearnableBias(self.weight.shape[1])
def _build_weight_clip_val(self, quant_method, learnable, init_val):
if quant_method == 'uniform':
# init_val = self.weight.mean().item() + 3 * self.weight.std().item()
self.register_buffer('weight_clip_val', torch.tensor([-init_val, init_val]))
if learnable:
self.weight_clip_val = nn.Parameter(self.weight_clip_val)
elif quant_method == 'elastic':
assert learnable, 'Elastic method must use leranable step size!'
self.weight_clip_val = AlphaInit(torch.tensor(1.0)) # stepsize will be initialized in the first quantization
else:
self.register_buffer('weight_clip_val', None)
def _build_input_clip_val(self, quant_method, learnable, init_val):
if quant_method == 'uniform':
self.register_buffer('input_clip_val', torch.tensor([-init_val, init_val]))
if learnable:
self.input_clip_val = nn.Parameter(self.input_clip_val)
elif quant_method == 'elastic' or quant_method == 'bwn':
assert learnable, 'Elastic method must use leranable step size!'
self.input_clip_val = AlphaInit(torch.tensor(1.0)) # stepsize will be initialized in the first quantization
else:
self.register_buffer('input_clip_val', None)
def forward(self, input):
# quantize weight
weight = weight_quant_fn(self.weight, self.weight_clip_val, num_bits=self.weight_bits, symmetric=self.symmetric,
quant_method=self.weight_quant_method, layerwise=self.weight_layerwise)
# quantize input
input = self.move(input)
input = act_quant_fn(input, self.input_clip_val, num_bits=self.input_bits, symmetric=self.symmetric,
quant_method=self.input_quant_method, layerwise=self.input_layerwise)
out = nn.functional.linear(input, weight)
if not self.bias is None:
out += self.bias.view(1, -1).expand_as(out)
return out
class QuantizeEmbedding(nn.Embedding):
def __init__(self, *kargs, clip_val=2.5, weight_bits=8, learnable=False, symmetric=True,
embed_layerwise=False, weight_quant_method="twn", **kwargs):
super(QuantizeEmbedding, self).__init__(*kargs, **kwargs)
self.weight_bits = weight_bits
self.learnable = learnable
self.symmetric = symmetric
self.embed_layerwise = embed_layerwise
self.weight_quant_method = weight_quant_method
self._build_embed_clip_val(weight_quant_method, learnable, init_val=clip_val)
def _build_embed_clip_val(self, quant_method, learnable, init_val):
if quant_method == 'uniform':
self.register_buffer('embed_clip_val', torch.tensor([-init_val, init_val]))
if learnable:
self.embed_clip_val = nn.Parameter(self.embed_clip_val)
elif quant_method == 'elastic':
assert learnable, 'Elastic method must use leranable step size!'
self.embed_clip_val = AlphaInit(torch.tensor(1.0)) # stepsize will be initialized in the first quantization
else:
self.register_buffer('embed_clip_val', None)
def forward(self, input):
weight = weight_quant_fn(self.weight, self.embed_clip_val, num_bits=self.weight_bits, symmetric=self.symmetric,
quant_method=self.weight_quant_method, layerwise=self.embed_layerwise)
out = nn.functional.embedding(
input, weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
return out
|
bit-main
|
transformer/utils_quant.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
import abc
import sys
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
class _LRSchedule(ABC):
""" Parent of all LRSchedules here. """
warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
def __init__(self, warmup=0.002, t_total=-1, **kw):
"""
:param warmup: what fraction of t_total steps will be used for linear warmup
:param t_total: how many training steps (updates) are planned
:param kw:
"""
super(_LRSchedule, self).__init__(**kw)
if t_total < 0:
logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
warmup = max(warmup, 0.)
self.warmup, self.t_total = float(warmup), float(t_total)
self.warned_for_t_total_at_progress = -1
def get_lr(self, step, nowarn=False):
"""
:param step: which of t_total steps we're on
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
:return: learning rate multiplier for current update
"""
if self.t_total < 0:
return 1.
progress = float(step) / self.t_total
ret = self.get_lr_(progress)
# warning for exceeding t_total (only active with warmup_linear
if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
logger.warning(
"Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
.format(ret, self.__class__.__name__))
self.warned_for_t_total_at_progress = progress
# end warning
return ret
@abc.abstractmethod
def get_lr_(self, progress):
"""
:param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
:return: learning rate multiplier for current update
"""
return 1.
class ConstantLR(_LRSchedule):
def get_lr_(self, progress):
return 1.
class WarmupCosineSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
warn_t_total = True
def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
"""
:param warmup: see LRSchedule
:param t_total: see LRSchedule
:param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
:param kw:
"""
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
self.cycles = cycles
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert(cycles >= 1.)
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
return ret
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
"""
All training progress is divided in `cycles` (default=1.) parts of equal length.
Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
followed by a learning rate decreasing from 1. to 0. following a cosine curve.
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
assert(warmup * cycles < 1.)
warmup = warmup * cycles if warmup >= 0 else warmup
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
def get_lr_(self, progress):
progress = progress * self.cycles % 1.
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * progress))
return ret
class WarmupConstantSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Keeps learning rate equal to 1. after warmup.
"""
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return 1.
class WarmupLinearSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
"""
warn_t_total = True
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return max((progress - 1.) / (self.warmup - 1.), 0.)
SCHEDULES = {
None: ConstantLR,
"none": ConstantLR,
"warmup_cosine": WarmupCosineSchedule,
"warmup_constant": WarmupConstantSchedule,
"warmup_linear": WarmupLinearSchedule
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
schedule: schedule to use for the warmup (see above).
Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
If `None` or `'none'`, learning rate is always kept constant.
Default : `'warmup_linear'`
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
|
bit-main
|
transformer/optimization.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True, basic_only=False,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
self.basic_only = basic_only
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
if self.basic_only:
split_tokens.append(token)
else:
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab.get(token, self.vocab['[UNK]']))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
# assert pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP
resolved_vocab_file = os.path.join(pretrained_model_name_or_path, 'vocab.txt')
max_len = 512
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
bit-main
|
transformer/tokenization.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from .file_utils import WEIGHTS_NAME, CONFIG_NAME
from .configuration_bert import BertConfig
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "",
}
BERT_CONFIG_NAME = 'bert_config.json'
TF_WEIGHTS_NAME = 'model.ckpt'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info(
"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu}
NORM = {'layer_norm': BertLayerNorm}
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[
:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, output_att=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# quantize before compute scores
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / \
math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output, layer_att = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output, layer_att
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output, layer_att = self.attention(
hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, layer_att
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.layer = nn.ModuleList([BertLayer(config)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask):
all_encoder_layers = []
all_encoder_atts = []
for _, layer_module in enumerate(self.layer):
all_encoder_layers.append(hidden_states)
hidden_states, layer_att = layer_module(
hidden_states, attention_mask)
all_encoder_atts.append(layer_att)
all_encoder_layers.append(hidden_states)
return all_encoder_layers, all_encoder_atts
class BertPooler(nn.Module):
def __init__(self, config, recurs=None):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
self.config = config
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. "-1" refers to last layer
pooled_output = hidden_states[-1][:, 0]
pooled_output = self.dense(pooled_output)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_scratch(cls, pretrained_model_name_or_path, *inputs, **kwargs):
resolved_config_file = os.path.join(
pretrained_model_name_or_path, CONFIG_NAME)
config = BertConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
model = cls(config, *inputs, **kwargs)
return model
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
config = kwargs.get('config', None)
kwargs.pop('config', None)
if config is None:
# Load config
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(
pretrained_model_name_or_path, WEIGHTS_NAME)
logger.info("Loading model {}".format(weights_path))
state_dict = torch.load(weights_path, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(
pretrained_model_name_or_path, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
logger.info('loading model...')
load(model, prefix=start_prefix)
logger.info('done!')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None,
output_all_encoded_layers=True, output_att=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers, layer_atts = self.encoder(embedding_output,extended_attention_mask)
pooled_output = self.pooler(encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
if not output_att:
return encoded_layers, pooled_output
return encoded_layers, layer_atts, pooled_output
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None,
attention_mask=None, labels=None):
sequence_output, att_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss, att_output, sequence_output
return logits, att_output, sequence_output
class BertForQuestionAnswering(BertPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]"
input_ids = tokenizer.encode(input_text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))
# a nice puppet
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = 2
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, self.num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids=None, token_type_ids=None, attention_mask=None,
start_positions=None, end_positions=None):
sequence_output, att_output, _ = self.bert(input_ids,token_type_ids,attention_mask)
logits = self.qa_outputs(sequence_output[-1])
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss, att_output, sequence_output
return (start_logits, end_logits), att_output, sequence_output
|
bit-main
|
transformer/modeling_bert.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
MISC_NAME = "misc.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
|
bit-main
|
transformer/file_utils.py
|
from IPython.core.display import display_html, HTML, display_javascript, Javascript
import json
import numpy as np
import inspect
import re
from collections import defaultdict
from copy import deepcopy
class TransactionManager:
"""
Class for maintaining a set of n_threads + 1 states
Allows these states to be read and written to by threads, the latter via supplied functions
Commits are also modeled explicitly
Visualization of a supplied set of txns in the txn viewer
"""
def __init__(self, n_threads, initial_main_vals=None):
self.n_threads = n_threads
# The state for each variable is stored as a row
# where indices correspond to thread number, RAM is FIRST element
self._state = defaultdict(lambda : [0]*(self.n_threads + 1))
# Set initial values in global / RAM
if initial_main_vals:
for var, val in initial_main_vals.iteritems():
self._state[var][0] = val
# Start with an initial state
self._log = [{
'thread': -1,
'operation': 'initial',
'state': dict(deepcopy(self._state))
}]
def commit(self, thread):
"""Commit the actions since the last commit"""
# TODO: implement actual functionality!
self._log.append({
'thread': thread,
'operation': 'COMMIT',
'state': dict(deepcopy(self._state))
})
def abort(self, thread):
"""Abort the actions since the last commit"""
# TODO: implement actual functionality!
self._log.append({
'thread': thread,
'operation': 'ABORT',
'state': dict(deepcopy(self._state))
})
def read(self, thread, var):
"""Read var from disk / global into thread local state"""
self._state[var][thread+1] = self._state[var][0]
self._log.append({
'thread': thread,
'operation': 'READ(%s)' % var,
'state': dict(deepcopy(self._state))
})
return self._state[var][thread+1]
def write(self, thread, var, val):
self.write_fn(thread, var, value=val)
def write_fn(self, thread, var, value=None, f=None):
if value and not hasattr(value, '__call__'):
val = value
val_string = str(value)
elif f and hasattr(f, '__call__'):
val = f(self._state[var][thread+1])
val_string = lambda_function_repr(f)
else:
raise Exception('f or value must be specified.')
old = self._state[var][0]
self._state[var][0] = val
self._state[var][thread+1] = val # Also set local value
self._log.append({
'thread': thread,
'operation': 'WRITE(%s, %s)' % (var, val_string),
'var': var,
'old': old,
'new': val,
'state': dict(deepcopy(self._state))
})
def print_log(self):
for line in self._log:
print line
def display(self, chart_num=0, configs_in={}):
"""Display the TXN viewer based on full current log"""
# dump input txns to jsonfor transfer to js
with open('txnLog.json', 'wb') as f:
json.dump(self._log, f)
# merge default configs
config = {
'chartNum': chart_num,
'numThreads': self.n_threads
}
config.update(configs_in)
js = ''.join('var %s = %s\n' % (k,v) for k,v in config.iteritems())
# JS
js += open('txnViewer.js', 'rb').read()
js_libs = [
'//d3js.org/d3.v3.min.js',
'https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js',
'https://ajax.googleapis.com/ajax/libs/jqueryui/1.11.4/jquery-ui.min.js'
]
# HTML
html_scripts = [
'<link rel="stylesheet" href="https://ajax.googleapis.com/ajax/libs/jqueryui/1.11.4/themes/smoothness/jquery-ui.css">'
]
html="""
<head>{0}</head>
<h3>TXN VIEWER</h3>
<table style="border: none; border-collapse: collapse;">
<tr style="border: none;">
<td style="border: none;">
<div id="top-spacer-{1}"></div>
<div id="chart-{1}"></div>
<div id="slider-{1}"></div>
</td>
<td style="vertical-align:top; border: none;">
<table id="vals-{1}"></table>
</td>
</tr>
<tr style="border: none;">
<td colspan="2" style="border: none;"><h4>The Log</h4></td>
</tr>
<tr><td colspan="2"><div id="log-{1}"></td></tr>
</table>
""".format(''.join(html_scripts), chart_num)
# Display in IPython notebook
display_html(HTML(data=html))
display_javascript(Javascript(data=js, lib=js_libs))
def lambda_function_repr(f):
try:
return re.search(r'lambda.*?:(.*?)(,|\)$)', inspect.getsource(f).strip()).group(1).strip()
except AttributeError:
return '?'
|
cs145-notebooks-2016-master
|
lecture-7-8/txn_viewer.py
|
from collections import namedtuple, OrderedDict
from copy import copy
import numpy as np
from IPython.core.display import display_html, HTML, display_javascript, Javascript
import json
import random
class BufferMemoryException(Exception):
pass
class PageNotFoundException(Exception):
pass
class FileNotFoundException(Exception):
pass
class Page:
def __init__(self, fid, pid, size, data=None):
self.file_id = fid
self.id = pid
self.size = size
if data is None:
self._data = [None]*size
else:
self.set_all(data)
self._i = 0
def size(self, count_empty=False):
return len(filter(lambda e : e or count_empty, self._data))
def get(self, i):
if i < self.size:
return self._data[i]
else:
raise IndexError
def pop(self):
# TODO FINISH THIS!!!
x = self._data[self._i]
self._data[self._i] = None
self._i += 1
return x
def peek(self):
# TODO: Finish this!
return self._data[self._i]
def is_empty(self):
return len([d for d in self._data if d is not None]) == 0
def set(self, i, val):
if i < self.size:
self._data[i] = val
else:
raise IndexError
def set_all(self, data):
if len(data) != self.size:
raise Exception("Data and page size do not match")
else:
self._data = data
def copy(self):
return Page(self.file_id, self.id, self.size, data=copy(self._data))
def get_data_copy(self):
return copy(self._data)
def __iter__(self):
return iter(self._data)
def next(self):
if self._i < self.size:
self._i += 1
return self.get(self._i-1)
else:
raise StopIteration()
def __str__(self):
return "<Page(file_id=%s, id=%s, data=%s)>" % (self.file_id, self.id, self.data)
def __repr__(self):
return self.__str__()
class FileIterator:
"""
Simple class for iterating through a file and reading successive elements of pages
By default, the FileIterator only uses a single page (frame) of the buffer,
And either gets the next element in the page or releases it and reads in a new one from disk
"""
def __init__(self, b, file_id):
self.buffer = b
self.file_id = file_id
self.file_size = len(self.buffer.get_file(file_id))
self.P = self.buffer.page_size
self._page = None
self._p = -1 # The page index
self._e = self.P - 1 # The element index
self._current_value = None
def _next_page(self):
"""Release the current page & get next non-null page, stopping iteration if EOF reached"""
if self._page:
self.buffer.release(self._page)
if self._p < self.file_size - 1:
self._p += 1
self._page = self.buffer.read(self.file_id, self._p)
if self._page is None:
self._next_page()
else:
self._p = -1
self._e = self.P - 1
raise StopIteration
def next(self):
"""
Get next *element* of the pages in the file
Handles reading / flushing pages so that at most one page is in the buffer
"""
# Iterate the element counter and get next page if at end of current one
self._e += 1
if self._e == self.P:
self._e = 0
self._next_page()
# Get the next element
x = self._page.get(self._e)
# Skip None elements by recursing
if x is None:
x = self.next()
self._current_value = x
return x
def get_next(self):
"""Returns None instead of StopIteration exception if EOF reached"""
try:
return self.next()
except StopIteration:
return None
def erase_current(self):
"""
Deletes the current element from page, still storing it in FileIterator
Mostly for animations
"""
self._page.set(self._e, None)
self.buffer.log_buffer_data_diffs()
def peek(self):
"""Returns the current value in the stream without advancing it"""
return self._current_value
def __iter__(self):
return self
class FileWriter:
"""
A simple class for writing to a file
By default the FileWriter only takes up a single page (frame) in the buffer,
flushing to disk when full
"""
def __init__(self, b, file_id):
self.buffer = b
self.file_id = file_id
self.P = self.buffer.page_size
self._page = None
self._i = 0 # The page index
self.pages_written = 0
def append(self, x):
"""Adds an element to the next free slot in a current or new page"""
# Load new page, then set next element
if self._page is None:
self._page = self.buffer.new_page(self.file_id)
self._i = 0
self._page.set(self._i, x)
self._i += 1
# Also log for animation
self.buffer.log_buffer_data_diffs()
# If page is full, flush here
if self._i == self.P:
self.buffer.flush(self._page)
self.pages_written += 1
self._page = None
def close(self):
"""Closes the writer"""
if self._page is not None:
self.buffer.flush(self._page)
self.pages_written += 1
class Buffer:
def __init__(self, page_size=4, buffer_size=4, sequential_cost=1.0, buffer_queue_indicator=None):
self.buffer_size = buffer_size
self.page_size = page_size
# Display tooltip over e.g. LRU, MRU
self.buffer_queue_indicator = buffer_queue_indicator
# The buffer is a hash table of pages, of fixed size
self._buffer = [None]*self.buffer_size
self._buffer_order = []
self._buffer_map = {}
# The disk is a list of files, which are lists of pages
self._disk = []
# Keep track of the last read / write fid,pid for sequential discount
self._last_id = None
self._sequential_cost = sequential_cost
# The log records read & write operations between disk, buffer and main (name??)
self._io_count = {
'bufferReads': 0,
'bufferWrites': 0,
'diskReads': 0,
'diskWrites': 0
}
self._log = []
# for d3 animations
self._chart_num = 0
self._pages_start_state = []
self._diff_log_start = 0
self._buffer_stale = [None]*self.buffer_size
def buffer_is_full(self):
return len(filter(lambda x : x is None, self._buffer)) == 0
def get_empty_buffer_slot(self):
for i in range(len(self._buffer)):
if self._buffer[i] is None:
return i
else:
raise BufferMemoryException
def get_empty_disk_slot(self):
i = -1
for i in range(len(self._disk)):
if self._disk[i] is None:
self._disk[i] = []
return i
self._disk.append([])
return i + 1
def get_file_size(self, file_id, count_empty=False):
return len(filter(lambda p : p or count_empty, self._disk[file_id]))
def _update_log(self, op, page, buffer_idx, old_location, new_location, keep_old, file_id=None, show=True, tooltip_content=None):
fid = page.file_id if page else file_id
pid = page.id if page else None
page_data = page.get_data_copy() if page else None
self._log.append({
"operation": op,
"oldLocation": old_location,
"newLocation": new_location,
"file": fid,
"page": pid,
"bufferIndex": buffer_idx,
"pageData": page_data,
"keepOld": keep_old,
"ioCount": copy(self._io_count),
"show": show,
"tooltipContent": tooltip_content
})
def print_log(self):
for l in self._log:
print '%s : id=(%s,%s) : %s -> %s [bi=%s]' % (l['operation'], l['file'], l['page'], l['oldLocation'], l['newLocation'], l['bufferIndex'])
def get_buffer_page(self, idx):
"""Returns page & buffer index of specific page by buffer order"""
if type(idx) == int:
if idx >= self.buffer_size:
raise BufferMemoryException
else:
if idx == 'LRU':
j = 0
elif idx == 'MRU':
j = -1
else:
raise Exception("Unrecognized index type.")
if len(self._buffer_order) > 0:
i = self._buffer_order[j]
else:
return None, None
return self._buffer[i], i
def update_buffer_queue_indicator(self, remove_idx=None, add_idx=None):
"""
Updates the buffer order queue = tracks order in which pages were put in buffer
Sends tooltip updates to log for a separate tooltip to indicate e.g. LRU/MRU
"""
if remove_idx is not None:
self._buffer_order = filter(lambda i : i != remove_idx, self._buffer_order)
if add_idx is not None:
self._buffer_order.append(add_idx)
if self.buffer_queue_indicator:
page, buffer_idx = self.get_buffer_page(self.buffer_queue_indicator)
self._update_log('TOOLTIP-2', page, buffer_idx, 'BUFFER', 'BUFFER', False, tooltip_content=self.buffer_queue_indicator)
def read(self, fid, pid):
"""
Attempts to read page from buffer, else tries to load a copy of the page from disk
Throws exceptions if page not in buffer and buffer is full or page not found on disk
"""
id = (fid, pid)
self.log_buffer_data_diffs()
# Not in buffer and buffer full!
if id not in self._buffer_map and self.buffer_is_full():
raise BufferMemoryException
# File and/or page not found!
elif fid >= len(self._disk) or pid >= len(self._disk[fid]):
raise PageNotFoundException
else:
# If not already in buffer, read from disk to buffer
# Find an empty slot in the buffer and insert copy of page from disk
if id not in self._buffer_map:
if self._last_id == (id[0], id[1]-1):
self._io_count['diskReads'] += self._sequential_cost
else:
self._io_count['diskReads'] += 1
i = self.get_empty_buffer_slot()
page = self._disk[fid][pid].copy()
self._buffer[i] = page
self._buffer_map[id] = i
self._update_log('READ FROM DISK', page, i, 'DISK', 'BUFFER', True)
# log for sequential discounting
self._last_id = id
# Perform & record read *from* buffer, adjust buffer use order
i = self._buffer_map[id]
self.update_buffer_queue_indicator(remove_idx=i, add_idx=i)
self._io_count['bufferReads'] += 1
page = self._buffer[i]
self._update_log('Read from Buffer', page, i, 'BUFFER', 'BUFFER', False)
return page
def new_page(self, fid):
"""
Creates a new page, in buffer only, returning the page
"""
# Buffer full!
if self.buffer_is_full():
raise BufferMemoryException
# New page must be assigned to a file, and this file must already exist on disk!
elif fid >= len(self._disk):
raise FileNotFoundException
# Create in buffer- log this (mainly for animation)
else:
# Get the next index for the file, append an empty placeholder in the file on disk
# TODO: replace this method, have them do manually?
pid = len(self._disk[fid])
self._disk[fid].append(None)
# Place a new page in the buffer
page = Page(fid, pid, self.page_size)
i = self.get_empty_buffer_slot()
self._buffer[i] = page
self._buffer_stale[i] = page.copy()
self._buffer_map[(fid, pid)] = i
self.update_buffer_queue_indicator(add_idx=i)
self._update_log('Write to Buffer', page, i, None, 'BUFFER', False)
return page
def release(self, page):
"""
Releases page from buffer without flushing to disk, clearing the buffer frame
"""
self.log_buffer_data_diffs()
id = (page.file_id, page.id)
# Must be in buffer!
if id not in self._buffer_map:
raise PageNotFoundException
# Release from buffer without flushing to disk
else:
i = self._buffer_map.pop(id)
self.update_buffer_queue_indicator(remove_idx=i)
self._update_log('RELEASE', page, i, 'BUFFER', None, False)
self._buffer[i] = None
def flush(self, page):
"""
Writes the page to buffer, then flushes the page in buffer to disk, clearing it from buffer
"""
self.log_buffer_data_diffs()
fid = page.file_id
pid = page.id
id = (fid, pid)
# Must be in buffer!
if id not in self._buffer_map:
raise PageNotFoundException
# Must have a file to write to!
elif page.file_id >= len(self._disk):
raise FileNotFoundException
# Flush to disk: remove from buffer, buffer map, place in disk
else:
if self._last_id == (id[0], id[1]-1):
self._io_count['diskWrites'] += self._sequential_cost
else:
self._io_count['diskWrites'] += 1
self._update_log('FLUSH TO DISK', page, self._buffer_map[id], 'BUFFER', 'DISK', False)
i = self._buffer_map.pop(id)
self._disk[fid][pid] = self._buffer[i]
self._buffer[i] = None
self.update_buffer_queue_indicator(remove_idx=i)
# log for sequential discounting
self._last_id = id
def get_file(self, fid):
"""
Gets a file from disk, which is just a list of page ids
"""
if fid >= len(self._disk):
raise FileNotFoundException
else:
return range(len(self._disk[fid]))
def get_file_len(self, fid):
return len(self.get_file(fid))
def new_file(self):
"""
Creates a new file on disk, returns the file id
"""
file_id = self.get_empty_disk_slot()
self._update_log('NEWFILE', None, None, None, None, False, file_id=file_id)
return file_id
def delete_file(self, file_id):
self._disk[file_id] = None
self._update_log('DELETEFILE', None, None, None, None, False, file_id=file_id)
def log_buffer_data_diffs(self):
"""
The user can modify the data in the buffer directly
We want to have a record of these updates though for logging & animation
"""
for i,page in enumerate(self._buffer):
if page is not None:
# Note: only show + log IO count if an actual data change vs. initialization
diff = self._buffer_stale[i] and not np.array_equal(page.get_data_copy(), self._buffer_stale[i].get_data_copy())
if self._buffer_stale[i] is None or diff:
self._update_log('WRITE (Buffer)', page, i, 'BUFFER', 'BUFFER', False, show=False)
self._buffer_stale[i] = page.copy()
if diff:
self._io_count['bufferWrites'] += 1
def display(self, speed=1000, from_start=False, reset_io=False, buffer_num=0):
"""
Display an animation, based on a starting state & the logged diff
Once this is called, the starting state & log mark are advanced
"""
self.log_buffer_data_diffs()
# Create a new html pane with unique id
chart_id = '%s-%s' % (buffer_num, self._chart_num)
html = """
<table>
<tr><th><i>IO Counts</i></th><th>R</th><th>W</th></tr>
<tr>
<td><i>To/from Buffer</i></td>
<td id="chart-{0}-bufferReads">0</td>
<td id="chart-{0}-bufferWrites">0</td>
</tr>
<tr>
<td><b>To/from Disk</b></td>
<td id="chart-{0}-diskReads">0</td>
<td id="chart-{0}-diskWrites">0</td>
</tr>
</table>
<br />
<div class="tooltip" id="chart-{0}-tooltip" style="position:absolute; z-index:100; color:white; background:black; opacity:0.7; padding:3px; border-radius:5px; display:none;">TOOLTIP!</div>
<div class="tooltip" id="chart-{0}-tooltip-2" style="position:absolute; z-index:100; color:white; background:black; opacity:0.7; padding:3px; border-radius:5px; display:none;">TOOLTIP!</div>
<div id="chart-{0}"></div>
""".format(chart_id)
# Dump log to json file
with open('pagesLog.json', 'wb') as f:
json.dump(self._log, f)
# Create animation in js/d3
js_configs = {
'DURATION': speed,
'chartNum': chart_id,
'numBufferPages': self.buffer_size,
'pageSize': self.page_size,
'numDiskPages': 5,
'logStart': self._diff_log_start if not from_start else 0
}
js = js_file_with_configs('compModel.js', js_configs)
js_libs = [
'https://d3js.org/d3.v3.min.js',
'https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js',
'https://ajax.googleapis.com/ajax/libs/jqueryui/1.11.4/jquery-ui.min.js'
]
display_html(HTML(data=html))
display_javascript(Javascript(data=js, lib=js_libs))
self._chart_num += 1
# Advance current animation state in log
self.display_set_mark(reset_io=reset_io)
def display_set_mark(self, reset_io=True):
"""
Set mark so that next display command starts animation at this point
Also reset IO counter
"""
self._diff_log_start = len(self._log)
if reset_io:
for k in self._io_count.iterkeys():
self._io_count[k] = 0
def js_file_with_configs(fpath, configs):
"""
Take in a js filepath and a dictionary of configs to be passed in as global vars
"""
js = ''
for k,v in configs.iteritems():
if type(v) == str:
js += 'var %s = "%s"\n' % (k,v)
elif type(v) in [int, float]:
js += 'var %s = %s\n' % (k,v)
js += open(fpath, 'rb').read()
return js
def new_rand_file(b, r, l, sorted=False):
vals = random.sample(range(r), l)
if sorted:
vals.sort()
fid = b.new_file()
fw = FileWriter(b, fid)
for v in vals:
fw.append(v)
fw.close()
return fid
|
cs145-notebooks-2016-master
|
bonus/io_backend.py
|
from collections import namedtuple, OrderedDict
from copy import copy
import numpy as np
from IPython.core.display import display_html, HTML, display_javascript, Javascript
import json
import random
class BufferMemoryException(Exception):
pass
class PageNotFoundException(Exception):
pass
class FileNotFoundException(Exception):
pass
class Page:
def __init__(self, fid, pid, size, data=None):
self.file_id = fid
self.id = pid
self.size = size
if data is None:
self._data = [None]*size
else:
self.set_all(data)
self._i = 0
def size(self, count_empty=False):
return len(filter(lambda e : e or count_empty, self._data))
def get(self, i):
if i < self.size:
return self._data[i]
else:
raise IndexError
def pop(self):
# TODO FINISH THIS!!!
x = self._data[self._i]
self._data[self._i] = None
self._i += 1
return x
def peek(self):
# TODO: Finish this!
return self._data[self._i]
def is_empty(self):
return len([d for d in self._data if d is not None]) == 0
def set(self, i, val):
if i < self.size:
self._data[i] = val
else:
raise IndexError
def set_all(self, data):
if len(data) != self.size:
raise Exception("Data and page size do not match")
else:
self._data = data
def copy(self):
return Page(self.file_id, self.id, self.size, data=copy(self._data))
def get_data_copy(self):
return copy(self._data)
def __iter__(self):
return iter(self._data)
def next(self):
if self._i < self.size:
self._i += 1
return self.get(self._i-1)
else:
raise StopIteration()
def __str__(self):
return "<Page(file_id=%s, id=%s, data=%s)>" % (self.file_id, self.id, self.data)
def __repr__(self):
return self.__str__()
class FileIterator:
"""
Simple class for iterating through a file and reading successive elements of pages
By default, the FileIterator only uses a single page (frame) of the buffer,
And either gets the next element in the page or releases it and reads in a new one from disk
"""
def __init__(self, b, file_id):
self.buffer = b
self.file_id = file_id
self.file_size = len(self.buffer.get_file(file_id))
self.P = self.buffer.page_size
self._page = None
self._p = -1 # The page index
self._e = self.P - 1 # The element index
self._current_value = None
def _next_page(self):
"""Release the current page & get next non-null page, stopping iteration if EOF reached"""
if self._page:
self.buffer.release(self._page)
if self._p < self.file_size - 1:
self._p += 1
self._page = self.buffer.read(self.file_id, self._p)
if self._page is None:
self._next_page()
else:
self._p = -1
self._e = self.P - 1
raise StopIteration
def next(self):
"""
Get next *element* of the pages in the file
Handles reading / flushing pages so that at most one page is in the buffer
"""
# Iterate the element counter and get next page if at end of current one
self._e += 1
if self._e == self.P:
self._e = 0
self._next_page()
# Get the next element
x = self._page.get(self._e)
# Skip None elements by recursing
if x is None:
x = self.next()
self._current_value = x
return x
def get_next(self):
"""Returns None instead of StopIteration exception if EOF reached"""
try:
return self.next()
except StopIteration:
return None
def erase_current(self):
"""
Deletes the current element from page, still storing it in FileIterator
Mostly for animations
"""
self._page.set(self._e, None)
self.buffer.log_buffer_data_diffs()
def peek(self):
"""Returns the current value in the stream without advancing it"""
return self._current_value
def __iter__(self):
return self
class FileWriter:
"""
A simple class for writing to a file
By default the FileWriter only takes up a single page (frame) in the buffer,
flushing to disk when full
"""
def __init__(self, b, file_id):
self.buffer = b
self.file_id = file_id
self.P = self.buffer.page_size
self._page = None
self._i = 0 # The page index
self.pages_written = 0
def append(self, x):
"""Adds an element to the next free slot in a current or new page"""
# Load new page, then set next element
if self._page is None:
self._page = self.buffer.new_page(self.file_id)
self._i = 0
self._page.set(self._i, x)
self._i += 1
# Also log for animation
self.buffer.log_buffer_data_diffs()
# If page is full, flush here
if self._i == self.P:
self.buffer.flush(self._page)
self.pages_written += 1
self._page = None
def close(self):
"""Closes the writer"""
if self._page is not None:
self.buffer.flush(self._page)
self.pages_written += 1
class Buffer:
def __init__(self, page_size=4, buffer_size=4, sequential_cost=1.0, buffer_queue_indicator=None):
self.buffer_size = buffer_size
self.page_size = page_size
# Display tooltip over e.g. LRU, MRU
self.buffer_queue_indicator = buffer_queue_indicator
# The buffer is a hash table of pages, of fixed size
self._buffer = [None]*self.buffer_size
self._buffer_order = []
self._buffer_map = {}
# The disk is a list of files, which are lists of pages
self._disk = []
# Keep track of the last read / write fid,pid for sequential discount
self._last_id = None
self._sequential_cost = sequential_cost
# The log records read & write operations between disk, buffer and main (name??)
self._io_count = {
'bufferReads': 0,
'bufferWrites': 0,
'diskReads': 0,
'diskWrites': 0
}
self._log = []
# for d3 animations
self._chart_num = 0
self._pages_start_state = []
self._diff_log_start = 0
self._buffer_stale = [None]*self.buffer_size
def buffer_is_full(self):
return len(filter(lambda x : x is None, self._buffer)) == 0
def get_empty_buffer_slot(self):
for i in range(len(self._buffer)):
if self._buffer[i] is None:
return i
else:
raise BufferMemoryException
def get_empty_disk_slot(self):
i = -1
for i in range(len(self._disk)):
if self._disk[i] is None:
self._disk[i] = []
return i
self._disk.append([])
return i + 1
def get_file_size(self, file_id, count_empty=False):
return len(filter(lambda p : p or count_empty, self._disk[file_id]))
def _update_log(self, op, page, buffer_idx, old_location, new_location, keep_old, file_id=None, show=True, tooltip_content=None):
fid = page.file_id if page else file_id
pid = page.id if page else None
page_data = page.get_data_copy() if page else None
self._log.append({
"operation": op,
"oldLocation": old_location,
"newLocation": new_location,
"file": fid,
"page": pid,
"bufferIndex": buffer_idx,
"pageData": page_data,
"keepOld": keep_old,
"ioCount": copy(self._io_count),
"show": show,
"tooltipContent": tooltip_content
})
def print_log(self):
for l in self._log:
print '%s : id=(%s,%s) : %s -> %s [bi=%s]' % (l['operation'], l['file'], l['page'], l['oldLocation'], l['newLocation'], l['bufferIndex'])
def get_buffer_page(self, idx):
"""Returns page & buffer index of specific page by buffer order"""
if type(idx) == int:
if idx >= self.buffer_size:
raise BufferMemoryException
else:
if idx == 'LRU':
j = 0
elif idx == 'MRU':
j = -1
else:
raise Exception("Unrecognized index type.")
if len(self._buffer_order) > 0:
i = self._buffer_order[j]
else:
return None, None
return self._buffer[i], i
def update_buffer_queue_indicator(self, remove_idx=None, add_idx=None):
"""
Updates the buffer order queue = tracks order in which pages were put in buffer
Sends tooltip updates to log for a separate tooltip to indicate e.g. LRU/MRU
"""
if remove_idx is not None:
self._buffer_order = filter(lambda i : i != remove_idx, self._buffer_order)
if add_idx is not None:
self._buffer_order.append(add_idx)
if self.buffer_queue_indicator:
page, buffer_idx = self.get_buffer_page(self.buffer_queue_indicator)
self._update_log('TOOLTIP-2', page, buffer_idx, 'BUFFER', 'BUFFER', False, tooltip_content=self.buffer_queue_indicator)
def read(self, fid, pid):
"""
Attempts to read page from buffer, else tries to load a copy of the page from disk
Throws exceptions if page not in buffer and buffer is full or page not found on disk
"""
id = (fid, pid)
self.log_buffer_data_diffs()
# Not in buffer and buffer full!
if id not in self._buffer_map and self.buffer_is_full():
raise BufferMemoryException
# File and/or page not found!
elif fid >= len(self._disk) or pid >= len(self._disk[fid]):
raise PageNotFoundException
else:
# If not already in buffer, read from disk to buffer
# Find an empty slot in the buffer and insert copy of page from disk
if id not in self._buffer_map:
if self._last_id == (id[0], id[1]-1):
self._io_count['diskReads'] += self._sequential_cost
else:
self._io_count['diskReads'] += 1
i = self.get_empty_buffer_slot()
page = self._disk[fid][pid].copy()
self._buffer[i] = page
self._buffer_map[id] = i
self._update_log('READ FROM DISK', page, i, 'DISK', 'BUFFER', True)
# log for sequential discounting
self._last_id = id
# Perform & record read *from* buffer, adjust buffer use order
i = self._buffer_map[id]
self.update_buffer_queue_indicator(remove_idx=i, add_idx=i)
self._io_count['bufferReads'] += 1
page = self._buffer[i]
self._update_log('Read from Buffer', page, i, 'BUFFER', 'BUFFER', False)
return page
def new_page(self, fid):
"""
Creates a new page, in buffer only, returning the page
"""
# Buffer full!
if self.buffer_is_full():
raise BufferMemoryException
# New page must be assigned to a file, and this file must already exist on disk!
elif fid >= len(self._disk):
raise FileNotFoundException
# Create in buffer- log this (mainly for animation)
else:
# Get the next index for the file, append an empty placeholder in the file on disk
# TODO: replace this method, have them do manually?
pid = len(self._disk[fid])
self._disk[fid].append(None)
# Place a new page in the buffer
page = Page(fid, pid, self.page_size)
i = self.get_empty_buffer_slot()
self._buffer[i] = page
self._buffer_stale[i] = page.copy()
self._buffer_map[(fid, pid)] = i
self.update_buffer_queue_indicator(add_idx=i)
self._update_log('Write to Buffer', page, i, None, 'BUFFER', False)
return page
def release(self, page):
"""
Releases page from buffer without flushing to disk, clearing the buffer frame
"""
self.log_buffer_data_diffs()
id = (page.file_id, page.id)
# Must be in buffer!
if id not in self._buffer_map:
raise PageNotFoundException
# Release from buffer without flushing to disk
else:
i = self._buffer_map.pop(id)
self.update_buffer_queue_indicator(remove_idx=i)
self._update_log('RELEASE', page, i, 'BUFFER', None, False)
self._buffer[i] = None
def flush(self, page):
"""
Writes the page to buffer, then flushes the page in buffer to disk, clearing it from buffer
"""
self.log_buffer_data_diffs()
fid = page.file_id
pid = page.id
id = (fid, pid)
# Must be in buffer!
if id not in self._buffer_map:
raise PageNotFoundException
# Must have a file to write to!
elif page.file_id >= len(self._disk):
raise FileNotFoundException
# Flush to disk: remove from buffer, buffer map, place in disk
else:
if self._last_id == (id[0], id[1]-1):
self._io_count['diskWrites'] += self._sequential_cost
else:
self._io_count['diskWrites'] += 1
self._update_log('FLUSH TO DISK', page, self._buffer_map[id], 'BUFFER', 'DISK', False)
i = self._buffer_map.pop(id)
self._disk[fid][pid] = self._buffer[i]
self._buffer[i] = None
self.update_buffer_queue_indicator(remove_idx=i)
# log for sequential discounting
self._last_id = id
def get_file(self, fid):
"""
Gets a file from disk, which is just a list of page ids
"""
if fid >= len(self._disk):
raise FileNotFoundException
else:
return range(len(self._disk[fid]))
def get_file_len(self, fid):
return len(self.get_file(fid))
def new_file(self):
"""
Creates a new file on disk, returns the file id
"""
file_id = self.get_empty_disk_slot()
self._update_log('NEWFILE', None, None, None, None, False, file_id=file_id)
return file_id
def delete_file(self, file_id):
self._disk[file_id] = None
self._update_log('DELETEFILE', None, None, None, None, False, file_id=file_id)
def log_buffer_data_diffs(self):
"""
The user can modify the data in the buffer directly
We want to have a record of these updates though for logging & animation
"""
for i,page in enumerate(self._buffer):
if page is not None:
# Note: only show + log IO count if an actual data change vs. initialization
diff = self._buffer_stale[i] and not np.array_equal(page.get_data_copy(), self._buffer_stale[i].get_data_copy())
if self._buffer_stale[i] is None or diff:
self._update_log('WRITE (Buffer)', page, i, 'BUFFER', 'BUFFER', False, show=False)
self._buffer_stale[i] = page.copy()
if diff:
self._io_count['bufferWrites'] += 1
def display(self, speed=1000, from_start=False, reset_io=False, buffer_num=0):
"""
Display an animation, based on a starting state & the logged diff
Once this is called, the starting state & log mark are advanced
"""
self.log_buffer_data_diffs()
# Create a new html pane with unique id
chart_id = '%s-%s' % (buffer_num, self._chart_num)
html = """
<table>
<tr><th><i>IO Counts</i></th><th>R</th><th>W</th></tr>
<tr>
<td><i>To/from Buffer</i></td>
<td id="chart-{0}-bufferReads">0</td>
<td id="chart-{0}-bufferWrites">0</td>
</tr>
<tr>
<td><b>To/from Disk</b></td>
<td id="chart-{0}-diskReads">0</td>
<td id="chart-{0}-diskWrites">0</td>
</tr>
</table>
<br />
<div class="tooltip" id="chart-{0}-tooltip" style="position:absolute; z-index:100; color:white; background:black; opacity:0.7; padding:3px; border-radius:5px; display:none;">TOOLTIP!</div>
<div class="tooltip" id="chart-{0}-tooltip-2" style="position:absolute; z-index:100; color:white; background:black; opacity:0.7; padding:3px; border-radius:5px; display:none;">TOOLTIP!</div>
<div id="chart-{0}"></div>
""".format(chart_id)
# Dump log to json file
with open('pagesLog.json', 'wb') as f:
json.dump(self._log, f)
# Create animation in js/d3
js_configs = {
'DURATION': speed,
'chartNum': chart_id,
'numBufferPages': self.buffer_size,
'pageSize': self.page_size,
'numDiskPages': 5,
'logStart': self._diff_log_start if not from_start else 0
}
js = js_file_with_configs('compModel.js', js_configs)
js_libs = [
'https://d3js.org/d3.v3.min.js',
'https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js',
'https://ajax.googleapis.com/ajax/libs/jqueryui/1.11.4/jquery-ui.min.js'
]
display_html(HTML(data=html))
display_javascript(Javascript(data=js, lib=js_libs))
self._chart_num += 1
# Advance current animation state in log
self.display_set_mark(reset_io=reset_io)
def display_set_mark(self, reset_io=True):
"""
Set mark so that next display command starts animation at this point
Also reset IO counter
"""
self._diff_log_start = len(self._log)
if reset_io:
for k in self._io_count.iterkeys():
self._io_count[k] = 0
def js_file_with_configs(fpath, configs):
"""
Take in a js filepath and a dictionary of configs to be passed in as global vars
"""
js = ''
for k,v in configs.iteritems():
if type(v) == str:
js += 'var %s = "%s"\n' % (k,v)
elif type(v) in [int, float]:
js += 'var %s = %s\n' % (k,v)
js += open(fpath, 'rb').read()
return js
def new_rand_file(b, r, l, sorted=False):
vals = random.sample(range(r), l)
if sorted:
vals.sort()
fid = b.new_file()
fw = FileWriter(b, fid)
for v in vals:
fw.append(v)
fw.close()
return fid
|
cs145-notebooks-2016-master
|
lecture-14-15/io_backend.py
|
from IPython.core.display import display_html, HTML
def to_html_table(res, style=None):
html = '<table' + (' style="' + style + '"' if style else '') + '><tr><th>'
html += '</th><th>'.join(res.keys) + '</th></tr><tr><td>'
html += '</td></tr><tr><td>'.join(['</td><td>'.join([str(cell) for cell in row]) for row in list(res)])
return html + '</tr></table>'
def side_by_side(l, r):
s = "display: inline-block;"
html = to_html_table(l, style=s) + ' ' + to_html_table(r, style=s)
display_html(HTML(data=html))
|
cs145-notebooks-2016-master
|
lecture-14-15/display_tools.py
|
# Utilities
import csv
import numpy as np
from collections import namedtuple
from collections import defaultdict
from collections import Counter
def loadData():
PlayerTeam = namedtuple('PlayerTeam','teamname playername')
PlayerCollege = namedtuple('PlayerCollege', 'playername collegename')
teams = []
for line in csv.reader(open("playerteam.csv", "rb"), delimiter='\t'):
p = PlayerTeam._make(line)
teams.append(p)
colleges = []
for line in csv.reader(open("playercollege.csv", "rb"), delimiter='\t'):
p = PlayerCollege._make(line)
colleges.append(p)
return teams, colleges
def partitionTable(table, hashfunction,buckets):
hRes = defaultdict(list)
for b in range(buckets):
hRes[b] = []
attribute = 'playername'
for s in table:
hRes[hashfunction(getattr(s, attribute),buckets)].append(s)
return hRes
|
cs145-notebooks-2016-master
|
PS3/nfl.py
|
from collections import namedtuple, OrderedDict
from copy import copy
import numpy as np
from IPython.core.display import display_html, HTML, display_javascript, Javascript
import json
import random
class BufferMemoryException(Exception):
pass
class PageNotFoundException(Exception):
pass
class FileNotFoundException(Exception):
pass
class Page:
def __init__(self, fid, pid, size, data=None):
self.file_id = fid
self.id = pid
self.size = size
if data is None:
self._data = [None]*size
else:
self.set_all(data)
self._i = 0
def size(self, count_empty=False):
return len(filter(lambda e : e or count_empty, self._data))
def get(self, i):
if i < self.size:
return self._data[i]
else:
raise IndexError
def pop(self):
# TODO FINISH THIS!!!
x = self._data[self._i]
self._data[self._i] = None
self._i += 1
return x
def peek(self):
# TODO: Finish this!
return self._data[self._i]
def is_empty(self):
return len([d for d in self._data if d is not None]) == 0
def set(self, i, val):
if i < self.size:
self._data[i] = val
else:
raise IndexError
def set_all(self, data):
if len(data) != self.size:
raise Exception("Data and page size do not match")
else:
self._data = data
def copy(self):
return Page(self.file_id, self.id, self.size, data=copy(self._data))
def get_data_copy(self):
return copy(self._data)
def __iter__(self):
return iter(self._data)
def next(self):
if self._i < self.size:
self._i += 1
return self.get(self._i-1)
else:
raise StopIteration()
def __str__(self):
return "<Page(file_id=%s, id=%s, data=%s)>" % (self.file_id, self.id, self.data)
def __repr__(self):
return self.__str__()
class FileIterator:
"""
Simple class for iterating through a file and reading successive elements of pages
By default, the FileIterator only uses a single page (frame) of the buffer,
And either gets the next element in the page or releases it and reads in a new one from disk
"""
def __init__(self, b, file_id):
self.buffer = b
self.file_id = file_id
self.file_size = len(self.buffer.get_file(file_id))
self.P = self.buffer.page_size
self._page = None
self._p = -1 # The page index
self._e = self.P - 1 # The element index
self._current_value = None
def _next_page(self):
"""Release the current page & get next non-null page, stopping iteration if EOF reached"""
if self._page:
self.buffer.release(self._page)
if self._p < self.file_size - 1:
self._p += 1
self._page = self.buffer.read(self.file_id, self._p)
if self._page is None:
self._next_page()
else:
self._p = -1
self._e = self.P - 1
raise StopIteration
def next(self):
"""
Get next *element* of the pages in the file
Handles reading / flushing pages so that at most one page is in the buffer
"""
# Iterate the element counter and get next page if at end of current one
self._e += 1
if self._e == self.P:
self._e = 0
self._next_page()
# Get the next element
x = self._page.get(self._e)
# Skip None elements by recursing
if x is None:
x = self.next()
self._current_value = x
return x
def get_next(self):
"""Returns None instead of StopIteration exception if EOF reached"""
try:
return self.next()
except StopIteration:
return None
def erase_current(self):
"""
Deletes the current element from page, still storing it in FileIterator
Mostly for animations
"""
self._page.set(self._e, None)
self.buffer.log_buffer_data_diffs()
def peek(self):
"""Returns the current value in the stream without advancing it"""
return self._current_value
def __iter__(self):
return self
class FileWriter:
"""
A simple class for writing to a file
By default the FileWriter only takes up a single page (frame) in the buffer,
flushing to disk when full
"""
def __init__(self, b, file_id):
self.buffer = b
self.file_id = file_id
self.P = self.buffer.page_size
self._page = None
self._i = 0 # The page index
self.pages_written = 0
def append(self, x):
"""Adds an element to the next free slot in a current or new page"""
# Load new page, then set next element
if self._page is None:
self._page = self.buffer.new_page(self.file_id)
self._i = 0
self._page.set(self._i, x)
self._i += 1
# Also log for animation
self.buffer.log_buffer_data_diffs()
# If page is full, flush here
if self._i == self.P:
self.buffer.flush(self._page)
self.pages_written += 1
self._page = None
def close(self):
"""Closes the writer"""
if self._page is not None:
self.buffer.flush(self._page)
self.pages_written += 1
class Buffer:
def __init__(self, page_size=4, buffer_size=4, sequential_cost=1.0, buffer_queue_indicator=None):
self.buffer_size = buffer_size
self.page_size = page_size
# Display tooltip over e.g. LRU, MRU
self.buffer_queue_indicator = buffer_queue_indicator
# The buffer is a hash table of pages, of fixed size
self._buffer = [None]*self.buffer_size
self._buffer_order = []
self._buffer_map = {}
# The disk is a list of files, which are lists of pages
self._disk = []
# Keep track of the last read / write fid,pid for sequential discount
self._last_id = None
self._sequential_cost = sequential_cost
# The log records read & write operations between disk, buffer and main (name??)
self._io_count = {
'bufferReads': 0,
'bufferWrites': 0,
'diskReads': 0,
'diskWrites': 0
}
self._log = []
# for d3 animations
self._chart_num = 0
self._pages_start_state = []
self._diff_log_start = 0
self._buffer_stale = [None]*self.buffer_size
def buffer_is_full(self):
return len(filter(lambda x : x is None, self._buffer)) == 0
def get_empty_buffer_slot(self):
for i in range(len(self._buffer)):
if self._buffer[i] is None:
return i
else:
raise BufferMemoryException
def get_empty_disk_slot(self):
i = -1
for i in range(len(self._disk)):
if self._disk[i] is None:
self._disk[i] = []
return i
self._disk.append([])
return i + 1
def get_file_size(self, file_id, count_empty=False):
return len(filter(lambda p : p or count_empty, self._disk[file_id]))
def _update_log(self, op, page, buffer_idx, old_location, new_location, keep_old, file_id=None, show=True, tooltip_content=None):
fid = page.file_id if page else file_id
pid = page.id if page else None
page_data = page.get_data_copy() if page else None
self._log.append({
"operation": op,
"oldLocation": old_location,
"newLocation": new_location,
"file": fid,
"page": pid,
"bufferIndex": buffer_idx,
"pageData": page_data,
"keepOld": keep_old,
"ioCount": copy(self._io_count),
"show": show,
"tooltipContent": tooltip_content
})
def print_log(self):
for l in self._log:
print '%s : id=(%s,%s) : %s -> %s [bi=%s]' % (l['operation'], l['file'], l['page'], l['oldLocation'], l['newLocation'], l['bufferIndex'])
def get_buffer_page(self, idx):
"""Returns page & buffer index of specific page by buffer order"""
if type(idx) == int:
if idx >= self.buffer_size:
raise BufferMemoryException
else:
if idx == 'LRU':
j = 0
elif idx == 'MRU':
j = -1
else:
raise Exception("Unrecognized index type.")
if len(self._buffer_order) > 0:
i = self._buffer_order[j]
else:
return None, None
return self._buffer[i], i
def update_buffer_queue_indicator(self, remove_idx=None, add_idx=None):
"""
Updates the buffer order queue = tracks order in which pages were put in buffer
Sends tooltip updates to log for a separate tooltip to indicate e.g. LRU/MRU
"""
if remove_idx is not None:
self._buffer_order = filter(lambda i : i != remove_idx, self._buffer_order)
if add_idx is not None:
self._buffer_order.append(add_idx)
if self.buffer_queue_indicator:
page, buffer_idx = self.get_buffer_page(self.buffer_queue_indicator)
self._update_log('TOOLTIP-2', page, buffer_idx, 'BUFFER', 'BUFFER', False, tooltip_content=self.buffer_queue_indicator)
def read(self, fid, pid):
"""
Attempts to read page from buffer, else tries to load a copy of the page from disk
Throws exceptions if page not in buffer and buffer is full or page not found on disk
"""
id = (fid, pid)
self.log_buffer_data_diffs()
# Not in buffer and buffer full!
if id not in self._buffer_map and self.buffer_is_full():
raise BufferMemoryException
# File and/or page not found!
elif fid >= len(self._disk) or pid >= len(self._disk[fid]):
raise PageNotFoundException
else:
# If not already in buffer, read from disk to buffer
# Find an empty slot in the buffer and insert copy of page from disk
if id not in self._buffer_map:
if self._last_id == (id[0], id[1]-1):
self._io_count['diskReads'] += self._sequential_cost
else:
self._io_count['diskReads'] += 1
i = self.get_empty_buffer_slot()
page = self._disk[fid][pid].copy()
self._buffer[i] = page
self._buffer_map[id] = i
self._update_log('READ FROM DISK', page, i, 'DISK', 'BUFFER', True)
# log for sequential discounting
self._last_id = id
# Perform & record read *from* buffer, adjust buffer use order
i = self._buffer_map[id]
self.update_buffer_queue_indicator(remove_idx=i, add_idx=i)
self._io_count['bufferReads'] += 1
page = self._buffer[i]
self._update_log('Read from Buffer', page, i, 'BUFFER', 'BUFFER', False)
return page
def new_page(self, fid):
"""
Creates a new page, in buffer only, returning the page
"""
# Buffer full!
if self.buffer_is_full():
raise BufferMemoryException
# New page must be assigned to a file, and this file must already exist on disk!
elif fid >= len(self._disk):
raise FileNotFoundException
# Create in buffer- log this (mainly for animation)
else:
# Get the next index for the file, append an empty placeholder in the file on disk
# TODO: replace this method, have them do manually?
pid = len(self._disk[fid])
self._disk[fid].append(None)
# Place a new page in the buffer
page = Page(fid, pid, self.page_size)
i = self.get_empty_buffer_slot()
self._buffer[i] = page
self._buffer_stale[i] = page.copy()
self._buffer_map[(fid, pid)] = i
self.update_buffer_queue_indicator(add_idx=i)
self._update_log('Write to Buffer', page, i, None, 'BUFFER', False)
return page
def release(self, page):
"""
Releases page from buffer without flushing to disk, clearing the buffer frame
"""
self.log_buffer_data_diffs()
id = (page.file_id, page.id)
# Must be in buffer!
if id not in self._buffer_map:
raise PageNotFoundException
# Release from buffer without flushing to disk
else:
i = self._buffer_map.pop(id)
self.update_buffer_queue_indicator(remove_idx=i)
self._update_log('RELEASE', page, i, 'BUFFER', None, False)
self._buffer[i] = None
def flush(self, page):
"""
Writes the page to buffer, then flushes the page in buffer to disk, clearing it from buffer
"""
self.log_buffer_data_diffs()
fid = page.file_id
pid = page.id
id = (fid, pid)
# Must be in buffer!
if id not in self._buffer_map:
raise PageNotFoundException
# Must have a file to write to!
elif page.file_id >= len(self._disk):
raise FileNotFoundException
# Flush to disk: remove from buffer, buffer map, place in disk
else:
if self._last_id == (id[0], id[1]-1):
self._io_count['diskWrites'] += self._sequential_cost
else:
self._io_count['diskWrites'] += 1
self._update_log('FLUSH TO DISK', page, self._buffer_map[id], 'BUFFER', 'DISK', False)
i = self._buffer_map.pop(id)
self._disk[fid][pid] = self._buffer[i]
self._buffer[i] = None
self.update_buffer_queue_indicator(remove_idx=i)
# log for sequential discounting
self._last_id = id
def get_file(self, fid):
"""
Gets a file from disk, which is just a list of page ids
"""
if fid >= len(self._disk):
raise FileNotFoundException
else:
return range(len(self._disk[fid]))
def get_file_len(self, fid):
return len(self.get_file(fid))
def new_file(self):
"""
Creates a new file on disk, returns the file id
"""
file_id = self.get_empty_disk_slot()
self._update_log('NEWFILE', None, None, None, None, False, file_id=file_id)
return file_id
def delete_file(self, file_id):
self._disk[file_id] = None
self._update_log('DELETEFILE', None, None, None, None, False, file_id=file_id)
def log_buffer_data_diffs(self):
"""
The user can modify the data in the buffer directly
We want to have a record of these updates though for logging & animation
"""
for i,page in enumerate(self._buffer):
if page is not None:
# Note: only show + log IO count if an actual data change vs. initialization
diff = self._buffer_stale[i] and not np.array_equal(page.get_data_copy(), self._buffer_stale[i].get_data_copy())
if self._buffer_stale[i] is None or diff:
self._update_log('WRITE (Buffer)', page, i, 'BUFFER', 'BUFFER', False, show=False)
self._buffer_stale[i] = page.copy()
if diff:
self._io_count['bufferWrites'] += 1
def display(self, speed=1000, from_start=False, reset_io=False, buffer_num=0):
"""
Display an animation, based on a starting state & the logged diff
Once this is called, the starting state & log mark are advanced
"""
self.log_buffer_data_diffs()
# Create a new html pane with unique id
chart_id = '%s-%s' % (buffer_num, self._chart_num)
html = """
<table>
<tr><th><i>IO Counts</i></th><th>R</th><th>W</th></tr>
<tr>
<td><i>To/from Buffer</i></td>
<td id="chart-{0}-bufferReads">0</td>
<td id="chart-{0}-bufferWrites">0</td>
</tr>
<tr>
<td><b>To/from Disk</b></td>
<td id="chart-{0}-diskReads">0</td>
<td id="chart-{0}-diskWrites">0</td>
</tr>
</table>
<br />
<div class="tooltip" id="chart-{0}-tooltip" style="position:absolute; z-index:100; color:white; background:black; opacity:0.7; padding:3px; border-radius:5px; display:none;">TOOLTIP!</div>
<div class="tooltip" id="chart-{0}-tooltip-2" style="position:absolute; z-index:100; color:white; background:black; opacity:0.7; padding:3px; border-radius:5px; display:none;">TOOLTIP!</div>
<div id="chart-{0}"></div>
""".format(chart_id)
# Dump log to json file
with open('pagesLog.json', 'wb') as f:
json.dump(self._log, f)
# Create animation in js/d3
js_configs = {
'DURATION': speed,
'chartNum': chart_id,
'numBufferPages': self.buffer_size,
'pageSize': self.page_size,
'numDiskPages': 5,
'logStart': self._diff_log_start if not from_start else 0
}
js = js_file_with_configs('compModel.js', js_configs)
js_libs = [
'https://d3js.org/d3.v3.min.js',
'https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js',
'https://ajax.googleapis.com/ajax/libs/jqueryui/1.11.4/jquery-ui.min.js'
]
display_html(HTML(data=html))
display_javascript(Javascript(data=js, lib=js_libs))
self._chart_num += 1
# Advance current animation state in log
self.display_set_mark(reset_io=reset_io)
def display_set_mark(self, reset_io=True):
"""
Set mark so that next display command starts animation at this point
Also reset IO counter
"""
self._diff_log_start = len(self._log)
if reset_io:
for k in self._io_count.iterkeys():
self._io_count[k] = 0
def js_file_with_configs(fpath, configs):
"""
Take in a js filepath and a dictionary of configs to be passed in as global vars
"""
js = ''
for k,v in configs.iteritems():
if type(v) == str:
js += 'var %s = "%s"\n' % (k,v)
elif type(v) in [int, float]:
js += 'var %s = %s\n' % (k,v)
js += open(fpath, 'rb').read()
return js
def new_rand_file(b, r, l, sorted=False):
vals = random.sample(range(r), l)
if sorted:
vals.sort()
fid = b.new_file()
fw = FileWriter(b, fid)
for v in vals:
fw.append(v)
fw.close()
return fid
|
cs145-notebooks-2016-master
|
PS3/io_backend.py
|
from IPython.core.display import display_html, HTML
def to_html_table(res, style=None):
html = '<table' + (' style="' + style + '"' if style else '') + '><tr><th>'
html += '</th><th>'.join(res.keys) + '</th></tr><tr><td>'
html += '</td></tr><tr><td>'.join(['</td><td>'.join([str(cell) for cell in row]) for row in list(res)])
return html + '</tr></table>'
def side_by_side(l, r):
s = "display: inline-block;"
html = to_html_table(l, style=s) + ' ' + to_html_table(r, style=s)
display_html(HTML(data=html))
|
cs145-notebooks-2016-master
|
lecture-16/display_tools.py
|
# TODO:
# 1. Remove asserts and replace with exceptions.
# 2. There is a lot of unpythonic code in here, someone who likes python can fix it :)
def get_result(x): return [tuple(t) for t in x]
def generate_dict(t, schema_index):
d = dict()
for x in schema_index.keys():
d[x] = t[schema_index[x]]
return tuple(sorted(list(d.iteritems())))
# Schema-aware comparison
def compare_results(x,y):
# First check the results are the same tupe.
if set(x.schema) <> set(y.schema): return False
# Now, we want to compare them as sets but the attribute orders may be different,
# so we turn each tuple into a dictionary
xd = map(lambda t : generate_dict(t, x.schema_index),get_result(x))
yd = map(lambda t : generate_dict(t, y.schema_index),get_result(y))
return set(xd) == set(yd)
class OpBase:
def __init__(self, schema, children):
self.schema = list(schema)
self.schema_index = dict([(b,a) for (a,b) in enumerate(self.schema)])
self.in_count = 0
self.out_count = 0
self.children = children
#self.count_reads = True
#self.op_str = None
def __repr__(self): return "{0}".format(self.schema)
# Get an index for an attribute
def resolve_attribute(self, x):
if x not in self.schema:
raise NameError("{0} not found in {1}".format(x,self.schema))
return self.schema_index[x]
# helper function to resolve many attributes
def resolve_attributes(self, attr):
return [self.resolve_attribute(x) for x in attr]
# Code for counting the number of tuples "flowing by"
def reset_count(self):
self.in_count = 0
self.out_count = 0
for c in self.children: c.reset_count()
def total_count(self):
return self.in_count + sum([c.total_count() for c in self.children if c.count_reads])
def count_str(self, offset):
out = " "*(4*offset) + "*" + " {0} ".format(self.op_str)
if self.count_reads:
out += "[tuples read in: {0} out: {1}]".format(self.in_count, self.out_count)
return out + "\n"
def toCount(self, offset=0):
return self.count_str(offset) + ''.join([c.toCount(offset+1) for c in self.children])
# This takes in a relation with an optional name.
# All operators yield a set of tuples and define a schema
class BaseRelation(OpBase):
def __init__(self, res, name=None):
self.res = res
self.name = name
self.count_reads = False
OpBase.__init__(self, res.keys, [])
self.op_str = "{0}({1}) has {2} tuples".format(self.name, ','.join(self.schema), len(self.res))
def __iter__(self):
for r in self.res:
self.in_count += 1
self.out_count += 1
yield r
def toMD(self):
return "{0}({1})".format(self.name, ','.join(self.schema))
class Select(OpBase):
"""Selection attr=val"""
def __init__(self, attr,val,op):
self.in_op = op
self.attr = attr
self.v = val
in_schema = self.in_op.schema
self.op_str = "$\sigma_{{{0}={1}}}$".format(self.attr, self.v)
assert(attr in in_schema) # TOOD: replace with an exception!
OpBase.__init__(self, in_schema, [self.in_op]) # Schema Unchanged
self.count_reads = True
def __iter__(self):
idx = self.in_op.resolve_attribute(self.attr)
for r in self.in_op:
self.in_count += 1
if r[idx] == self.v:
self.out_count += 1
yield r
def toMD(self):
return "$\sigma_{{{0}={1}}}$({2})".format(self.attr, self.v, self.in_op.toMD())
class Project(OpBase):
"""Projection."""
def __init__(self, attributes, op):
self.attributes = attributes
self.in_op = op
self.op_str = "$\Pi_{{{0}}}$".format(self.attributes)
assert(all([x in self.in_op.schema for x in attributes])) # TODO: replace with an exception
OpBase.__init__(self, self.attributes, [self.in_op]) # Schema changes!
self.count_reads = True
def project_helper(self, idx_list, t):
return tuple([t[i] for i in idx_list])
def __iter__(self):
idx_list = self.in_op.resolve_attributes(self.attributes)
# Remove duplicates
in_e = [self.project_helper(idx_list,t) for t in self.in_op]
self.in_count += len(in_e)
for u in set(in_e):
self.out_count += 1
yield u
def toMD(self):
return "$\Pi_{{{0}}}$({1})".format(','.join(self.attributes), self.in_op.toMD())
class CrossProduct(OpBase):
"""Cross Product"""
def __init__(self, op1, op2):
self.l = op1
self.r = op2
s1 = set(op1.schema)
s2 = set(op2.schema)
self.op_str = "$\times$"
# Make sure the schemas are distinct
if len(s1.intersection(s2)) != 0:
raise ValueError("Schemas must be distinct!")
OpBase.__init__(self, s1.union(s2), [op1,op2]) # Schema changes!
self.count_reads = True
def __iter__(self):
for x in self.l:
self.in_count += 1
for y in self.r:
self.in_count += 1
self.out_count += 1
yield tuple(x) + tuple(y)
def toMD(self):
return "{0} $\\times$ {1}".format(self.l.toMD(), self.r.toMD())
class NJoin(OpBase):
"""Natural Join"""
def __init__(self, op1, op2):
self.l = op1
self.r = op2
s1 = set(op1.schema)
s2 = set(op2.schema)
self.common = s1.intersection(s2)
self.op_str = "$\Join_{{{0}}}$".format(','.join(self.common))
OpBase.__init__(self, op1.schema + filter(lambda x : x not in self.common, op2.schema), [op1,op2])
self.count_reads = True
def __iter__(self):
# return common attributes in index-order of the *left* relation
idx_cl = sorted(self.l.resolve_attributes(self.common))
common_left_order = [c for i,c in sorted([(self.l.resolve_attribute(c),c) for c in self.common])]
idx_cr = self.r.resolve_attributes(common_left_order)
# return the attributes unique to the right relation in the *right* relation's order
idx_r = sorted(self.r.resolve_attributes(set(self.r.schema).difference(self.common)))
for x in self.l:
self.in_count += 1
for y in self.r:
self.in_count += 1
if all([x[idx_cl[i]] == y[idx_cr[i]] for i in range(len(self.common))]):
self.out_count += 1
ty = tuple([y[i] for i in idx_r])
yield tuple(x) + tuple(ty)
def toMD(self):
return "( {0} ) $\Join_{{{2}}}$ ( {1} )".format(self.l.toMD(), self.r.toMD(), ','.join(self.common))
class Union(OpBase):
"""Union"""
def __init__(self, op1, op2):
self.l = op1
self.r = op2
self.op_str = "$\\bigcup$"
assert(op1.schema == op2.schema)
OpBase.__init__(self, op1.schema, [op1,op2])
self.count_reads = True
def __iter__(self):
ll = get_result(self.l)
rl = get_result(self.r)
self.in_count += len(ll)
self.in_count += len(rl)
ls = set(ll)
rs = set(rl)
for x in ls.union(rs):
self.out_count += 1
yield x
def toMD(self):
return "( {0} ) $\\bigcup$ ( {1} )".format(self.l.toMD(), self.r.toMD())
class Difference(OpBase):
"""Difference"""
def __init__(self, op1, op2):
self.l = op1
self.r = op2
self.op_str = "-"
assert(op1.schema == op2.schema)
OpBase.__init__(self, op1.schema, [op1,op2])
self.count_reads = True
def __iter__(self):
ll = get_result(self.l)
rl = get_result(self.r)
self.in_count += len(ll)
self.in_count += len(rl)
ls = set(ll)
rs = set(rl)
for x in ls.difference(rs):
self.out_count += 1
yield x
def toMD(self):
return "( {0} ) - ( {1} )".format(self.l.toMD(), self.r.toMD())
|
cs145-notebooks-2016-master
|
lecture-16/relation_algebra.py
|
from IPython.core.display import display_html, HTML
def to_html_table(res, style=None):
html = '<table' + (' style="' + style + '"' if style else '') + '><tr><th>'
html += '</th><th>'.join(res.keys) + '</th></tr><tr><td>'
html += '</td></tr><tr><td>'.join(['</td><td>'.join([str(cell) for cell in row]) for row in list(res)])
return html + '</tr></table>'
def side_by_side(l, r):
s = "display: inline-block;"
html = to_html_table(l, style=s) + ' ' + to_html_table(r, style=s)
display_html(HTML(data=html))
|
cs145-notebooks-2016-master
|
lecture-17/display_tools.py
|
# TODO:
# 1. Remove asserts and replace with exceptions.
# 2. There is a lot of unpythonic code in here, someone who likes python can fix it :)
def get_result(x): return [tuple(t) for t in x]
def generate_dict(t, schema_index):
d = dict()
for x in schema_index.keys():
d[x] = t[schema_index[x]]
return tuple(sorted(list(d.iteritems())))
# Schema-aware comparison
def compare_results(x,y):
# First check the results are the same tupe.
if set(x.schema) <> set(y.schema): return False
# Now, we want to compare them as sets but the attribute orders may be different,
# so we turn each tuple into a dictionary
xd = map(lambda t : generate_dict(t, x.schema_index),get_result(x))
yd = map(lambda t : generate_dict(t, y.schema_index),get_result(y))
return set(xd) == set(yd)
class OpBase:
def __init__(self, schema, children):
self.schema = list(schema)
self.schema_index = dict([(b,a) for (a,b) in enumerate(self.schema)])
self.in_count = 0
self.out_count = 0
self.children = children
#self.count_reads = True
#self.op_str = None
def __repr__(self): return "{0}".format(self.schema)
# Get an index for an attribute
def resolve_attribute(self, x):
if x not in self.schema:
raise NameError("{0} not found in {1}".format(x,self.schema))
return self.schema_index[x]
# helper function to resolve many attributes
def resolve_attributes(self, attr):
return [self.resolve_attribute(x) for x in attr]
# Code for counting the number of tuples "flowing by"
def reset_count(self):
self.in_count = 0
self.out_count = 0
for c in self.children: c.reset_count()
def total_count(self):
return self.in_count + sum([c.total_count() for c in self.children if c.count_reads])
def count_str(self, offset):
out = " "*(4*offset) + "*" + " {0} ".format(self.op_str)
if self.count_reads:
out += "[tuples read in: {0} out: {1}]".format(self.in_count, self.out_count)
return out + "\n"
def toCount(self, offset=0):
return self.count_str(offset) + ''.join([c.toCount(offset+1) for c in self.children])
# This takes in a relation with an optional name.
# All operators yield a set of tuples and define a schema
class BaseRelation(OpBase):
def __init__(self, res, name=None):
self.res = res
self.name = name
self.count_reads = False
OpBase.__init__(self, res.keys, [])
self.op_str = "{0}({1}) has {2} tuples".format(self.name, ','.join(self.schema), len(self.res))
def __iter__(self):
for r in self.res:
self.in_count += 1
self.out_count += 1
yield r
def toMD(self):
return "{0}({1})".format(self.name, ','.join(self.schema))
class Select(OpBase):
"""Selection attr=val"""
def __init__(self, attr,val,op):
self.in_op = op
self.attr = attr
self.v = val
in_schema = self.in_op.schema
self.op_str = "$\sigma_{{{0}={1}}}$".format(self.attr, self.v)
assert(attr in in_schema) # TOOD: replace with an exception!
OpBase.__init__(self, in_schema, [self.in_op]) # Schema Unchanged
self.count_reads = True
def __iter__(self):
idx = self.in_op.resolve_attribute(self.attr)
for r in self.in_op:
self.in_count += 1
if r[idx] == self.v:
self.out_count += 1
yield r
def toMD(self):
return "$\sigma_{{{0}={1}}}$({2})".format(self.attr, self.v, self.in_op.toMD())
class Project(OpBase):
"""Projection."""
def __init__(self, attributes, op):
self.attributes = attributes
self.in_op = op
self.op_str = "$\Pi_{{{0}}}$".format(self.attributes)
assert(all([x in self.in_op.schema for x in attributes])) # TODO: replace with an exception
OpBase.__init__(self, self.attributes, [self.in_op]) # Schema changes!
self.count_reads = True
def project_helper(self, idx_list, t):
return tuple([t[i] for i in idx_list])
def __iter__(self):
idx_list = self.in_op.resolve_attributes(self.attributes)
# Remove duplicates
in_e = [self.project_helper(idx_list,t) for t in self.in_op]
self.in_count += len(in_e)
for u in set(in_e):
self.out_count += 1
yield u
def toMD(self):
return "$\Pi_{{{0}}}$({1})".format(','.join(self.attributes), self.in_op.toMD())
class CrossProduct(OpBase):
"""Cross Product"""
def __init__(self, op1, op2):
self.l = op1
self.r = op2
s1 = set(op1.schema)
s2 = set(op2.schema)
self.op_str = "$\times$"
# Make sure the schemas are distinct
if len(s1.intersection(s2)) != 0:
raise ValueError("Schemas must be distinct!")
OpBase.__init__(self, s1.union(s2), [op1,op2]) # Schema changes!
self.count_reads = True
def __iter__(self):
for x in self.l:
self.in_count += 1
for y in self.r:
self.in_count += 1
self.out_count += 1
yield tuple(x) + tuple(y)
def toMD(self):
return "{0} $\\times$ {1}".format(self.l.toMD(), self.r.toMD())
class NJoin(OpBase):
"""Natural Join"""
def __init__(self, op1, op2):
self.l = op1
self.r = op2
s1 = set(op1.schema)
s2 = set(op2.schema)
self.common = s1.intersection(s2)
self.op_str = "$\Join_{{{0}}}$".format(','.join(self.common))
OpBase.__init__(self, op1.schema + filter(lambda x : x not in self.common, op2.schema), [op1,op2])
self.count_reads = True
def __iter__(self):
# return common attributes in index-order of the *left* relation
idx_cl = sorted(self.l.resolve_attributes(self.common))
common_left_order = [c for i,c in sorted([(self.l.resolve_attribute(c),c) for c in self.common])]
idx_cr = self.r.resolve_attributes(common_left_order)
# return the attributes unique to the right relation in the *right* relation's order
idx_r = sorted(self.r.resolve_attributes(set(self.r.schema).difference(self.common)))
for x in self.l:
self.in_count += 1
for y in self.r:
self.in_count += 1
if all([x[idx_cl[i]] == y[idx_cr[i]] for i in range(len(self.common))]):
self.out_count += 1
ty = tuple([y[i] for i in idx_r])
yield tuple(x) + tuple(ty)
def toMD(self):
return "( {0} ) $\Join_{{{2}}}$ ( {1} )".format(self.l.toMD(), self.r.toMD(), ','.join(self.common))
class Union(OpBase):
"""Union"""
def __init__(self, op1, op2):
self.l = op1
self.r = op2
self.op_str = "$\\bigcup$"
assert(op1.schema == op2.schema)
OpBase.__init__(self, op1.schema, [op1,op2])
self.count_reads = True
def __iter__(self):
ll = get_result(self.l)
rl = get_result(self.r)
self.in_count += len(ll)
self.in_count += len(rl)
ls = set(ll)
rs = set(rl)
for x in ls.union(rs):
self.out_count += 1
yield x
def toMD(self):
return "( {0} ) $\\bigcup$ ( {1} )".format(self.l.toMD(), self.r.toMD())
class Difference(OpBase):
"""Difference"""
def __init__(self, op1, op2):
self.l = op1
self.r = op2
self.op_str = "-"
assert(op1.schema == op2.schema)
OpBase.__init__(self, op1.schema, [op1,op2])
self.count_reads = True
def __iter__(self):
ll = get_result(self.l)
rl = get_result(self.r)
self.in_count += len(ll)
self.in_count += len(rl)
ls = set(ll)
rs = set(rl)
for x in ls.difference(rs):
self.out_count += 1
yield x
def toMD(self):
return "( {0} ) - ( {1} )".format(self.l.toMD(), self.r.toMD())
|
cs145-notebooks-2016-master
|
lecture-17/relation_algebra.py
|
# A = set(["name", "category"]) # These are the attribute set.
# fds = [ (set(["name"]),"color"),
# (set(["category"]), "department"),
# (set(["color", "category"]), "price") ]
def to_set(x):
"""Convert input int, string, list, tuple, set -> set"""
if type(x) == set:
return x
elif type(x) in [list, set]:
return set(x)
elif type(x) in [str, int]:
return set([x])
else:
raise Exception("Unrecognized type.")
def fd_to_str((lhs,rhs)): return ",".join(to_set(lhs)) + " -> " + ",".join(to_set(rhs))
def fds_to_str(fds): return "\n\t".join(map(fd_to_str, fds))
def set_to_str(x): return "{" + ",".join(x) + "}"
def fd_applies_to(fd, x):
lhs, rhs = map(to_set, fd)
return lhs.issubset(x)
def print_setup(A, fds):
print("Attributes = " + set_to_str(A))
print("FDs = \t" + fds_to_str(fds))
"""Does the FD apply"""
def fd_applies(x, (lhs,rhs)): return to_set(lhs).issubset(x)
def compute_closure(x, fds, verbose=False):
bChanged = True # We will repeat until there are no changes.
x_ret = x.copy() # Make a copy of the input to hold x^{+}
while bChanged:
bChanged = False # Must change on each iteration
for fd in fds: # loop through all the FDs.
(lhs, rhs) = map(to_set, fd) # recall: lhs -> rhs
if fd_applies_to(fd, x_ret) and not rhs.issubset(x_ret):
x_ret = x_ret.union(rhs)
if verbose:
print("Using FD " + fd_to_str(fd))
print("\t Updated x to " + set_to_str(x_ret))
bChanged = True
return x_ret
def is_fd_implied(fds, lhs, rhs, verbose=False):
"""Check if lhs -> rhs is implied by the given set of fds"""
xp = compute_closure(lhs,fds,verbose=verbose)
if verbose: print(set_to_str(lhs) +"+ = "+ set_to_str(xp))
return to_set(rhs).issubset(xp)
def is_superkey(A,B,fds, verbose=False):
"""Check if A is a super key in B according to the fds"""
return is_fd_implied(fds, A, B)
import itertools
def is_key(A,B,fds,verbose=False):
"""Check if A is a key in B wrt to fds"""
m=len(A)
subsets = set(itertools.combinations(A, m-1))
return is_superkey(A,B,fds) and all(not is_superkey(set(SA),B,fds) for SA in subsets)
#
# Key example from lecture
#
def key_example():
xmC=set(["A","B"])
xmB=set(["A","C"])
xmA=set(["B","C"])
B =set(["A","B","C"])
fd1=(xmC,"C"); fd2=(xmB,"B"); fd3=(xmA,"A")
fds=[fd1,fd2,fd3]
return is_key(xmA,B,fds) and is_key(xmB,B,fds) and is_key(xmC,B,fds)
from IPython.core.display import display_html, HTML
def to_html_table(res, style=None):
html = '<table' + (' style="' + style + '"' if style else '') + '><tr><th>'
html += '</th><th>'.join(res.keys) + '</th></tr><tr><td>'
html += '</td></tr><tr><td>'.join(['</td><td>'.join([str(cell) for cell in row]) for row in list(res)])
return html + '</tr></table>'
def display_side_by_side(l, r):
s = "display: inline-block;"
html = to_html_table(l, style=s) + ' ' + to_html_table(r, style=s)
display_html(HTML(data=html))
|
cs145-notebooks-2016-master
|
lecture-5-6/closure.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Optional
from overrides import overrides
from allennlp.training.metrics.metric import Metric
import torch
import numpy as np
from curiosity.util import get_logger
log = get_logger(__name__)
@Metric.register("mean_reciprocal_rank")
class MeanReciprocalRank(Metric):
def __init__(self):
self._reciprocal_ranks = []
def __call__(self, logits: torch.Tensor, labels: torch.Tensor, mask: torch.Tensor):
"""
logits and labels should be the same shape. Labels should be
an array of 0/1s to indicate if the document is relevant.
We don't need a mask here since we select nonzero labels and
masked entries in labels are never equal to 1 (Pedro is pretty sure)
"""
n_relevent = labels.sum().item()
if n_relevent == 0:
# None are relevent, no-op
return
preds = logits.argsort(dim=-1, descending=True)
# nonzeros occur where there are predictions to make
# (n_nonzero, 3)
# 3 = dims for batch, turn and fact
indices = labels.nonzero()
# TODO: This could be batched, but its a pain
all_ranks = []
# import ipdb; ipdb.set_trace()
for batch_idx, turn_idx, fact_idx in indices:
# List of predictions, first element is index
# of top ranked document, second of second-top, etc
inst_preds = preds[batch_idx, turn_idx]
rank = (inst_preds == fact_idx).nonzero().reshape(-1)
all_ranks.append(rank)
all_ranks = torch.cat(all_ranks)
# rank starts at zero from torch, += 1 for inversing it
reciprocal_ranks = 1 / (1 + all_ranks).float()
self._reciprocal_ranks.extend(reciprocal_ranks.cpu().numpy().tolist())
return reciprocal_ranks.mean()
@overrides
def get_metric(self, reset: bool = False) -> float:
if len(self._reciprocal_ranks) == 0:
log.warn("Taking MRR of zero length list")
mrr = 0.0
else:
mrr = np.array(self._reciprocal_ranks).mean()
if reset:
self.reset()
return mrr
@overrides
def reset(self):
self._reciprocal_ranks = []
@Metric.register("multilabel_micro_precision")
class MultilabelMicroF1(Metric):
"""
For a problem of (batch_size, *, n_classes) that is multilabel, compute the
precision, recall, F1 and take the average.
This is the micro average since each prediction
bumps the weight by one, whereas the macro average would compute accuracy by
class, then average the classes
This assumes that each class logit represents a binary cross entropy problem
"""
def __init__(self) -> None:
self._precision_correct_count = 0.0
self._precision_total_count = 0.0
self._recall_correct_count = 0.0
self._recall_total_count = 0.0
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None,
):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ...).
gold_labels : ``torch.Tensor``, required.
A tensor of the same shape as ``predictions``.
mask: ``torch.Tensor``, optional (default = None).
A tensor of the same shape as ``predictions``.
"""
predictions, gold_labels, mask = self.unwrap_to_tensors(
predictions, gold_labels, mask
)
# Some sanity checks.
if gold_labels.size() != predictions.size():
raise ValueError(
f"gold_labels must have shape == predictions.size() but "
f"found tensor of shape: {gold_labels.size()}"
)
if mask is not None and mask.size() != predictions.size():
raise ValueError(
f"mask must have shape == predictions.size() but "
f"found tensor of shape: {mask.size()}"
)
if mask is not None:
# mask out early to zero out preds/labels to count
predictions = predictions * mask
gold_labels = gold_labels * mask
# Don't care about batch anymore since its micro-averaged
predictions = predictions.view(-1)
gold_labels = gold_labels.view(-1)
# Find when they are equal, then only count places where the
# model actually made a prediction
# If (first is result, second is contrib to denom):
# - gold is zero and pred is zero -> zero, no contrib
# - gold is one and pred is zero -> zero, no contrib
# - gold is zero and pred is one -> zero, contrib
# - gold is one and pred is one -> one, contrib
precision = predictions.eq(gold_labels).long() * predictions
self._precision_correct_count += precision.sum().item()
self._precision_total_count += predictions.sum().item()
# Find where they are equal, then only count places where the
# gold label was true
recall = predictions.eq(gold_labels).long() * gold_labels
self._recall_correct_count += recall.sum().item()
self._recall_total_count += gold_labels.sum().item()
def get_metric(self, reset: bool = False):
"""
Returns
-------
The accumulated accuracy.
"""
precision = self._precision_correct_count / max(
self._precision_total_count, 1.0
)
recall = self._recall_correct_count / max(self._recall_total_count, 1.0)
f1 = 2 * (precision * recall) / max(precision + recall, 1.0)
if reset:
self.reset()
return {
"precision": precision,
"recall": recall,
"f1": f1,
}
@overrides
def reset(self):
self._precision_correct_count = 0.0
self._precision_total_count = 0.0
self._recall_correct_count = 0.0
self._recall_total_count = 0.0
|
curiosity-main
|
curiosity/metrics.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
A database containing all the wikipedia/entity linking information.
"""
import random
import re
import subprocess
import os
from contextlib import contextmanager
from collections import defaultdict
from typing import List, Tuple, Dict, NamedTuple
from sqlalchemy import Boolean, Integer, ForeignKey, Column, Text, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
Load,
sessionmaker,
scoped_session,
relationship,
selectinload,
)
from sqlalchemy.orm.scoping import ScopedSession
def md5sum(filename: str) -> str:
return (
subprocess.run(
f"md5sum {filename}", shell=True, stdout=subprocess.PIPE, check=True
)
.stdout.decode("utf-8")
.split()[0]
)
def verify_checksum(checksum: str, filename: str) -> None:
if os.path.exists(filename):
file_checksum = md5sum(filename)
if checksum != file_checksum:
raise ValueError(f"Incorrect checksum for: {filename}")
else:
raise ValueError(f"File does not exist: {filename}")
REF_RE = r"< ref >.*?< \/ ref >"
def clean_text(text: str) -> str:
"""
Sometimes the wiki text has formatting that I missed, like refs.
To avoid reparsing/relinking text, this is a patch fix to do it JIT
"""
return re.sub(REF_RE, "", text)
Base = declarative_base()
def create_sql(sql_path: str):
engine = create_engine(f"sqlite:///{sql_path}")
Base.metadata.bind = engine
factory = sessionmaker(bind=engine)
session_cls = scoped_session(factory)
return engine, session_cls()
class EntityLink(NamedTuple):
"""
This represents a single fact in the frontend
"""
page_entity: str
mention_entity: str
section_title: str
pageviews: int
context: str
is_location: bool
# Having database ids is helpful for backlinking and uniqueness
fact_id: int
mention_id: int
class Curriculum(NamedTuple):
views: int
entities: List[Tuple[str, int]]
class Fact(Base):
__tablename__ = "fact"
id = Column(Integer, primary_key=True)
page = Column(Text(), nullable=False, index=True)
section_idx = Column(Integer, nullable=False)
section_title = Column(Text(), nullable=False, index=True)
paragraph_idx = Column(Integer, nullable=False)
text = Column(Text(), nullable=False)
pageviews = Column(Integer, nullable=False)
mentions = relationship("Mention")
class Mention(Base):
__tablename__ = "mention"
id = Column(Integer, primary_key=True)
is_location = Column(Boolean, nullable=False)
pageviews = Column(Integer, nullable=False)
# Duplicate of Fact.page for speed, these never diverge so its safe
page = Column(Text(), nullable=False)
title = Column(Text(), nullable=False)
fact_id = Column(Integer, ForeignKey("fact.id"), nullable=False)
fact = relationship("Fact", back_populates="mentions")
class WikiSummary(Base):
__tablename__ = "wiki"
id = Column(Integer, primary_key=True)
title = Column(Text(), nullable=False, index=True)
text = Column(Text(), nullable=False)
is_simple = Column(Boolean, nullable=False)
class CuriosityStore:
"""
Convenience class for reading all data
"""
def __init__(self, sql_path) -> None:
self._engine = create_engine(f"sqlite:///{sql_path}")
Base.metadata.bind = self._engine
self._pages: List[str] = self._cache_pages()
@property
@contextmanager
def _session_scope(self) -> ScopedSession:
session = scoped_session(sessionmaker(bind=self._engine))
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
def _cache_pages(self) -> List[str]:
with self._session_scope as session:
rows = session.query(Fact.page).distinct().all()
return [r[0] for r in rows]
def get_fact_lookup(self):
with self._session_scope as session:
rows = session.query(Fact).all()
return {f.id: f.text for f in rows}
def get_focus_entities(self):
"""
Return all possible focus entities
"""
return self._pages
def random_entity(self) -> str:
if "CURIOSITY_ENTITY" in os.environ:
entity = os.environ["CURIOSITY_ENTITY"]
if entity in self._pages:
return entity
return random.choice(self._pages)
def random_sections(self, page_entity: str, n: int) -> List[str]:
with self._session_scope as session:
rows = (
session.query(Fact)
.filter_by(page=page_entity)
.filter(Fact.section_title != "Body")
.group_by(Fact.section_title)
.all()
)
section_names = [r.section_title for r in rows]
if n > len(section_names):
raise ValueError(f"Not enough sections: {len(section_names)} vs {n}")
return random.sample(section_names, n)
def get_links(self, page_entity: str) -> List[EntityLink]:
"""
For the given page_entity, return all entity links on the page
"""
with self._session_scope as session:
rows = (
session.query(Fact)
.filter_by(page=page_entity)
.options(selectinload(Fact.mentions))
)
links = []
for fact in rows:
for mention in fact.mentions:
links.append(
EntityLink(
fact.page,
mention.title,
fact.section_title,
mention.pageviews,
fact.text,
mention.is_location,
fact.id,
mention.id,
)
)
return links
def get_facts(self, page_entity: str, known_entity: str) -> List[EntityLink]:
"""
Find all facts on focus_entity's page that match known_entity
"""
with self._session_scope as session:
rows = (
session.query(Mention)
.filter(Mention.page == page_entity)
.filter(Mention.title == known_entity)
.group_by(Mention.fact_id)
.all()
)
return [
EntityLink(
m.fact.page,
m.title,
m.fact.section_title,
m.pageviews,
clean_text(m.fact.text),
m.is_location,
m.fact_id,
m.id,
)
for m in rows
]
def get_sections(self, page_entity: str) -> List[str]:
"""
Get all the valid sections for this page
"""
with self._session_scope as session:
rows = (
session.query(Fact)
.filter_by(page=page_entity)
.group_by(Fact.section_title)
.options(Load(Fact).load_only("page", "section_title"))
.all()
)
return [r.section_title for r in rows]
def get_page_facts(self, page_entity: str) -> List[EntityLink]:
"""
For the given page_entity, return unique facts
"""
with self._session_scope as session:
rows = (
session.query(Fact)
.filter_by(page=page_entity)
.options(selectinload(Fact.mentions))
)
links = []
for fact in rows:
links.append(
EntityLink(
fact.page,
fact.mentions[0].title,
fact.section_title,
fact.mentions[0].pageviews,
fact.text,
fact.mentions[0].is_location,
fact.id,
fact.mentions[0].id,
)
)
return links
def get_section_facts(self, page_entity: str, section: str) -> List[EntityLink]:
with self._session_scope as session:
rows = (
session.query(Fact)
.filter_by(page=page_entity, section_title=section)
.options(selectinload(Fact.mentions))
.all()
)
return [
# Use first mention for now, its not too important, but could be
# improved to random later
EntityLink(
f.page,
f.mentions[0].title,
f.section_title,
f.mentions[0].pageviews,
clean_text(f.text),
f.mentions[0].is_location,
f.id,
f.mentions[0].id,
)
for f in rows
]
def get_entity_summary(self, page_entity: str) -> str:
with self._session_scope as session:
return (
session.query(WikiSummary)
.filter_by(title=page_entity)
.first()
.text.strip()
)
def prominence_curriculum(self) -> Dict[str, Curriculum]:
"""
For each page, return a curriculum.
"""
with self._session_scope as session:
page_mentions = (
session.query(Fact)
.options(
selectinload(Fact.mentions),
Load(Fact).load_only("page", "pageviews"),
Load(Mention).load_only("pageviews", "title"),
)
.all()
)
curr = defaultdict(lambda: {"views": 0, "entities": set()})
for fact in page_mentions:
curr[fact.page]["views"] = fact.pageviews
current_mentions = curr[fact.page]["entities"]
for m in fact.mentions:
current_mentions.add((m.title, m.pageviews))
return curr
|
curiosity-main
|
curiosity/db.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import numpy as np
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.models import Model
from curiosity.db import verify_checksum, create_sql, Fact
@Predictor.register('curiosity_predictor')
class CuriosityPredictor(Predictor):
@overrides
def __init__(self, model: Model, dataset_reader: DatasetReader, frozen: bool = True) -> None:
if frozen:
model.eval()
self._model = model
self._dataset_reader = dataset_reader
self.cuda_device = next(self._model.named_parameters())[1].get_device()
# Hard coded fact loading
db_path = os.path.join('dialog_data', 'wiki_sql.sqlite.db')
engine, session = create_sql(db_path)
facts = (
session
.query(Fact)
.all()
)
self._dataset_reader._fact_lookup = {f.id: f for f in facts}
@overrides
def predict_json(self, inputs: JsonDict) -> JsonDict:
dialogs = inputs['dialogs']
out = []
for i, d in enumerate(dialogs):
if i == 30:
# Early termination to save time
break
instance = self._dataset_reader.text_to_instance(d)
prediction = self.predict_instance(instance)
# Label predictions for this dialog
label_prediction = {
'dialog_id': d['dialog_id']
}
for k, v in prediction.items():
if k != 'loss':
label_prediction[k] = np.argmax(v, axis=1).tolist()
out.append(label_prediction)
return out
|
curiosity-main
|
curiosity/predictors.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Dict, Optional, Union
import math
import torch
from torch import nn
from allennlp.nn.util import (
get_text_field_mask,
sequence_cross_entropy_with_logits,
masked_mean,
)
from allennlp.data.vocabulary import Vocabulary
from allennlp.models import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.training.metrics import CategoricalAccuracy, Average
from curiosity.nn import FactRanker
from curiosity.metrics import MeanReciprocalRank, MultilabelMicroF1
from curiosity.reader import DIALOG_ACT_LABELS
from curiosity.bert import BertEncoder
def gelu(x):
return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0)))
class GeLU(nn.Module):
__constants__ = ["inplace"]
def __init__(self, inplace=False):
super(GeLU, self).__init__()
self.inplace = inplace
def forward(self, input_):
return gelu(input_)
class Clamp(nn.Module):
def __init__(self, should_clamp: bool = False):
super(Clamp, self).__init__()
self._should_clamp = should_clamp
def forward(self, input_):
if self._should_clamp:
return 0.0 * input_
else:
return input_
@Model.register("curiosity_model")
class CuriosityModel(Model):
def __init__(
self,
vocab: Vocabulary,
use_glove: bool,
use_bert: bool,
bert_trainable: bool,
bert_name: str,
mention_embedder: TextFieldEmbedder,
dialog_context: Seq2SeqEncoder,
fact_ranker: FactRanker,
dropout_prob: float,
sender_emb_size: int,
act_emb_size: int,
fact_loss_weight: float,
fact_pos_weight: float,
utter_embedder: TextFieldEmbedder = None,
utter_context: Seq2VecEncoder = None,
disable_known_entities: bool = False,
disable_dialog_acts: bool = False,
disable_likes: bool = False,
disable_facts: bool = False,
):
super().__init__(vocab)
self._disable_known_entities = disable_known_entities
self._disable_dialog_acts = disable_dialog_acts
self._clamp_dialog_acts = Clamp(should_clamp=disable_dialog_acts)
self._disable_likes = disable_likes
self._clamp_likes = Clamp(should_clamp=disable_likes)
self._disable_facts = disable_facts
self._clamp_facts = Clamp(should_clamp=disable_facts)
self._fact_loss_weight = fact_loss_weight
self._fact_pos_weight = fact_pos_weight
self._sender_emb_size = sender_emb_size
self._sender_emb = nn.Embedding(2, sender_emb_size)
# Easier to use a matrix as embeddings, given the input format
self._act_embedder = nn.Linear(
vocab.get_vocab_size(DIALOG_ACT_LABELS), act_emb_size, bias=False
)
self._mention_embedder = mention_embedder
if int(use_glove) + int(use_bert) != 1:
raise ValueError("Cannot use bert and glove together")
self._use_glove = use_glove
self._use_bert = use_bert
self._bert_trainable = bert_trainable
self._bert_name = bert_name
self._utter_embedder = utter_embedder
self._utter_context = utter_context
# Bert encoder is embedder + context
if use_bert:
# Not trainable for now
self._bert_encoder = BertEncoder(
self._bert_name, requires_grad=bert_trainable
)
self._dist_utter_context = None
self._utter_dim = self._bert_encoder.get_output_dim()
else:
self._bert_encoder = None
self._dist_utter_context = TimeDistributed(self._utter_context)
self._utter_dim = self._utter_context.get_output_dim()
self._dialog_context = dialog_context
self._fact_ranker = fact_ranker
# Easier to code as cross entropy with two classes
# Likes are per message, for only assistant messages
self._like_classifier = nn.Linear(self._dialog_context.get_output_dim(), 2)
self._like_accuracy = CategoricalAccuracy()
self._like_loss_metric = Average()
# Transform the word_dim entity reps to hidden_dim
self._focus_net = nn.Sequential(
nn.Linear(
self._mention_embedder.get_output_dim(),
self._dialog_context.get_output_dim(),
),
GeLU(),
)
self._known_net = nn.Sequential(
nn.Linear(
self._mention_embedder.get_output_dim(),
self._dialog_context.get_output_dim(),
),
GeLU(),
Clamp(should_clamp=disable_known_entities),
)
# If we don't use known, then disable gradient to it
self._known_net.requires_grad = not disable_known_entities
# Dialog acts are per message, for all messages
# This network predicts the dialog act of the current message
# for both student and teacher
self._da_classifier = nn.Sequential(
nn.Linear(
self._utter_dim + self._dialog_context.get_output_dim(),
self._dialog_context.get_output_dim(),
),
GeLU(),
nn.Linear(
self._dialog_context.get_output_dim(),
vocab.get_vocab_size(DIALOG_ACT_LABELS),
),
)
self._da_bce_loss = torch.nn.BCEWithLogitsLoss(reduction="none")
self._da_f1_metric = MultilabelMicroF1()
self._da_loss_metric = Average()
# This network predicts what the next action should be
# It predicts for user and assistant since there isn't a real
# reason to restrict that
self._policy_classifier = nn.Sequential(
nn.Linear(
self._dialog_context.get_output_dim(),
self._dialog_context.get_output_dim(),
),
GeLU(),
nn.Linear(
self._dialog_context.get_output_dim(),
vocab.get_vocab_size(DIALOG_ACT_LABELS),
),
)
self._policy_bce_loss = torch.nn.BCEWithLogitsLoss(reduction="none")
self._policy_f1_metric = MultilabelMicroF1()
self._policy_loss_metric = Average()
self._fact_mrr = MeanReciprocalRank()
self._fact_loss_metric = Average()
self._dropout_prob = dropout_prob
self._dropout = nn.Dropout(dropout_prob)
# Fact use is much less prevalant, about 9 times less so, so factor that in
self._fact_bce_loss = torch.nn.BCEWithLogitsLoss(
reduction="none", pos_weight=torch.Tensor([self._fact_pos_weight])
)
def get_metrics(self, reset: bool = False):
da_metrics = self._da_f1_metric.get_metric(reset=reset)
policy_metrics = self._policy_f1_metric.get_metric(reset=reset)
metrics_to_report = {
"like_accuracy": self._like_accuracy.get_metric(reset=reset),
"like_loss": self._like_loss_metric.get_metric(reset=reset),
"fact_mrr": self._fact_mrr.get_metric(reset=reset),
"fact_loss": self._fact_loss_metric.get_metric(reset=reset),
"da_loss": self._da_loss_metric.get_metric(reset=reset),
"da_micro_f1": da_metrics["f1"],
"da_micro_precision": da_metrics["precision"],
"da_micro_recall": da_metrics["recall"],
"policy_micro_f1": policy_metrics["f1"],
"policy_micro_precision": policy_metrics["precision"],
"policy_micro_recall": policy_metrics["recall"],
}
metrics_to_report["total"] = (
metrics_to_report["fact_mrr"]
+ metrics_to_report["policy_micro_f1"]
+ metrics_to_report["da_micro_f1"]
+ metrics_to_report["like_accuracy"]
)
return metrics_to_report
def forward(
self,
messages: Dict[str, torch.Tensor],
# (batch_size, n_turns, n_facts, n_words)
facts: Dict[str, torch.Tensor],
# (batch_size, n_turns)
senders: torch.Tensor,
# (batch_size, n_turns, n_acts)
dialog_acts: torch.Tensor,
# (batch_size, n_turns)
dialog_acts_mask: torch.Tensor,
# (batch_size, n_entities)
known_entities: Dict[str, torch.Tensor],
# (batch_size, 1)
focus_entity: Dict[str, torch.Tensor],
# (batch_size, n_turns, n_facts)
fact_labels: Optional[torch.Tensor] = None,
# (batch_size, n_turns, 2)
likes: Optional[torch.Tensor] = None,
metadata: Optional[Dict] = None,
):
output = {}
# Take care of the easy stuff first
# (batch_size, n_entities)
known_entities_mask = get_text_field_mask(known_entities)
# (batch_size, n_turns, sender_emb_size)
sender_emb = self._sender_emb(senders)
known_emb = self._mention_embedder(known_entities)
# TODO: This could instead of averaged, be attended
known_vec = self._known_net(
masked_mean(known_emb, known_entities_mask.unsqueeze(-1), dim=1)
)
# There is always exactly one entity
focus_emb = self._focus_net(self._mention_embedder(focus_entity)[:, 0, :])
if self._use_bert:
# (batch_size, n_turns, n_words, emb_dim)
context, utter_mask = self._bert_encoder(messages)
context = self._dropout(context)
else:
# (batch_size, n_turns)
# This is the mask since not all dialogs have same number
# of turns
utter_mask = get_text_field_mask(messages)
# (batch_size, n_turns, n_words)
# Mask since not all utterances have same number of words
# Wrapping dim skips over n_messages dim
text_mask = get_text_field_mask(messages, num_wrapping_dims=1)
# (batch_size, n_turns, n_words, emb_dim)
embed = self._dropout(self._utter_embedder(messages))
# (batch_size, n_turns, hidden_dim)
context = self._dist_utter_context(embed, text_mask)
# (batch_size, n_turns, act_emb_size)
act_emb = self._act_embedder(dialog_acts.float())
act_emb = self._clamp_dialog_acts(act_emb)
# (batch_size, n_turns, hidden_dim + known_dim + focus_dim + sender_dim + act_dim)
n_turns = context.shape[1]
full_context = torch.cat(
(
context,
sender_emb,
act_emb,
focus_emb[:, None, :].repeat_interleave(n_turns, 1),
known_vec[:, None, :].repeat_interleave(n_turns, 1),
),
dim=-1,
)
# (batch_size, n_turns, hidden_dim)
# This assumes dialog_context does not peek into future
dialog_context = self._dialog_context(full_context, utter_mask)
# shift context one right, pad with zeros at front
# This makes it so that utter_t is paired with context_t-1
# which is what we want
# This is useful in a few different places, so compute it here once
shape = dialog_context.shape
shifted_context = torch.cat(
(
dialog_context.new_zeros([shape[0], 1, shape[2]]),
dialog_context[:, :-1, :],
),
dim=1,
)
has_loss = False
if self._disable_dialog_acts:
da_loss = 0
policy_loss = 0
else:
# Dialog act per utter loss
has_loss = True
da_loss = self._compute_da_loss(
output,
context,
shifted_context,
utter_mask,
dialog_acts,
dialog_acts_mask,
)
# Policy loss
policy_loss = self._compute_policy_loss(
output, shifted_context, utter_mask, dialog_acts, dialog_acts_mask
)
if self._disable_facts:
# If facts are disabled, don't output anything related
# to them
fact_loss = 0
else:
if self._use_bert:
# (batch_size, n_turns, n_words, emb_dim)
fact_repr, fact_mask = self._bert_encoder(facts)
fact_repr = self._dropout(fact_repr)
fact_mask[:, ::2] = 0
else:
# (batch_size, n_turns, n_facts)
# Wrapping dim skips over n_messages
fact_mask = get_text_field_mask(facts, num_wrapping_dims=1)
# In addition to masking padded facts, also explicitly mask
# user turns just in case
fact_mask[:, ::2] = 0
# (batch_size, n_turns, n_facts, n_words)
# Wrapping dim skips over n_turns and n_facts
fact_text_mask = get_text_field_mask(facts, num_wrapping_dims=2)
# (batch_size, n_turns, n_facts, n_words, emb_dim)
# Share encoder with utter encoder
# Again, stupid dimensions
fact_embed = self._dropout(self._utter_embedder(facts))
shape = fact_embed.shape
word_dim = shape[-2]
emb_dim = shape[-1]
reshaped_facts = fact_embed.view(-1, word_dim, emb_dim)
reshaped_fact_text_mask = fact_text_mask.view(-1, word_dim)
reshaped_fact_repr = self._utter_context(
reshaped_facts, reshaped_fact_text_mask
)
# No more emb dimension or word/seq dim
fact_repr = reshaped_fact_repr.view(shape[:-2] + (-1,))
fact_logits = self._fact_ranker(
shifted_context,
fact_repr,
)
output["fact_logits"] = fact_logits
if fact_labels is not None:
has_loss = True
fact_loss = self._compute_fact_loss(fact_logits, fact_labels, fact_mask)
self._fact_loss_metric(fact_loss.item())
self._fact_mrr(fact_logits, fact_labels, mask=fact_mask)
else:
fact_loss = 0
if self._disable_likes:
like_loss = 0
else:
has_loss = True
# (batch_size, n_turns, 2)
like_logits = self._like_classifier(dialog_context)
output["like_logits"] = like_logits
# There are several masks here to get the loss/metrics correct
# - utter_mask: mask out positions that do not have an utterance
# - user_mask: mask out positions that have a user utterances
# since their turns are never liked
# Using new_ones() preserves the type of the tensor
user_mask = utter_mask.new_ones(utter_mask.shape)
# Since the user is always even, this masks out user positions
user_mask[:, ::2] = 0
final_mask = utter_mask * user_mask
masked_likes = likes * final_mask
if likes is not None:
has_loss = True
like_loss = sequence_cross_entropy_with_logits(
like_logits, masked_likes, final_mask
)
self._like_accuracy(like_logits, masked_likes, final_mask)
self._like_loss_metric(like_loss.item())
else:
like_loss = 0
if has_loss:
output["loss"] = (
self._fact_loss_weight * fact_loss + like_loss + da_loss + policy_loss
)
return output
def _compute_da_loss(
self,
output_dict,
messages: torch.Tensor,
shifted_context: torch.Tensor,
utter_mask: torch.Tensor,
dialog_acts: torch.Tensor,
dialog_acts_mask: torch.Tensor,
):
"""
Given utterance at turn t, get the context (utter + acts) from t-1,
the utter_t, and predict the act
"""
message_w_context = torch.cat((messages, shifted_context), dim=-1)
# (batch_size, n_turns, n_dialog_acts)
da_logits = self._da_classifier(message_w_context)
output_dict["da_logits"] = da_logits
da_unreduced_loss = self._da_bce_loss(da_logits, dialog_acts.float())
# Note: the last dimension is expanded from singleton to n_dialog_acts
# Since the mask is at turn level
# (batch_size, n_turns, n_dialog_acts)
da_combined_mask = (
dialog_acts_mask.float().unsqueeze(-1) * utter_mask.float().unsqueeze(-1)
).expand_as(da_unreduced_loss)
da_unreduced_loss = da_combined_mask * da_unreduced_loss
# Mean loss over non-masked inputs, avoid division by zero
da_loss = da_unreduced_loss.sum() / da_combined_mask.sum().clamp(min=1)
da_loss_item = da_loss.item()
self._da_loss_metric(da_loss_item)
# (batch_size, n_turns, n_dialog_acts)
da_preds = (torch.sigmoid(da_logits) > 0.5).long()
self._da_f1_metric(da_preds, dialog_acts, da_combined_mask.long())
return da_loss
def _compute_policy_loss(
self,
output_dict,
shifted_context: torch.Tensor,
utter_mask: torch.Tensor,
dialog_acts: torch.Tensor,
dialog_acts_mask: torch.Tensor,
):
"""
Given utterance at turn t, get the context (utter + acts) from t-1,
the utter_t, and predict the act
"""
# (batch_size, n_turns, n_dialog_acts)
policy_logits = self._policy_classifier(shifted_context)
output_dict["policy_logits"] = policy_logits
policy_unreduced_loss = self._policy_bce_loss(
policy_logits, dialog_acts.float()
)
# Note: the last dimension is expanded from singleton to n_dialog_acts
# Since the mask is at turn level
# (batch_size, n_turns, n_dialog_acts)
policy_combined_mask = (
dialog_acts_mask.float().unsqueeze(-1) * utter_mask.float().unsqueeze(-1)
).expand_as(policy_unreduced_loss)
policy_unreduced_loss = policy_combined_mask * policy_unreduced_loss
# Mean loss over non-masked inputs, avoid division by zero
policy_loss = policy_unreduced_loss.sum() / policy_combined_mask.sum().clamp(
min=1
)
policy_loss_item = policy_loss.item()
self._policy_loss_metric(policy_loss_item)
# (batch_size, n_turns, n_dialog_acts)
policy_preds = (torch.sigmoid(policy_logits) > 0.5).long()
self._policy_f1_metric(policy_preds, dialog_acts, policy_combined_mask.long())
return policy_loss
def _compute_fact_loss(
self, logits: torch.Tensor, fact_labels: torch.Tensor, fact_mask: torch.Tensor
):
# Don't reduce to mask out padded facts
unreduced_loss = self._fact_bce_loss(logits, fact_labels)
total_loss = (unreduced_loss * fact_mask.float()).sum()
mean_loss = total_loss / fact_mask.float().sum()
return mean_loss
|
curiosity-main
|
curiosity/models.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Implement very simple similarity search
"""
from typing import List, Optional
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from curiosity.db import Fact, create_sql
class Similarity:
def __init__(self, wiki_sql_path: Optional[str] = None) -> None:
self._wiki_sql_path = wiki_sql_path
self._vectorizer = TfidfVectorizer(
stop_words="english",
ngram_range=(1, 2),
strip_accents="unicode",
decode_error="ignore",
)
def train(self) -> None:
if self._wiki_sql_path is None:
raise ValueError("Cannot fit tfidf with wiki_sql_path unset")
_, session = create_sql(self._wiki_sql_path)
docs = [r[0] for r in session.query(Fact.text).all()]
self._vectorizer.fit(docs)
def save(self, tfidf_path: str) -> None:
with open(tfidf_path, "wb") as f:
pickle.dump(self._vectorizer, f)
def load(self, tfidf_path: str) -> None:
with open(tfidf_path, "rb") as f:
self._vectorizer = pickle.load(f)
def score(self, query: str, docs: List[str]) -> List[float]:
query_vector = self._vectorizer.transform([query])
doc_vectors = self._vectorizer.transform(docs)
return cosine_similarity(query_vector, doc_vectors)[0].tolist()
|
curiosity-main
|
curiosity/similarity.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
def get_logger(name):
log = logging.getLogger(name)
if len(log.handlers) < 2:
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
fh = logging.FileHandler("curio-model.log")
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(sh)
log.setLevel(logging.INFO)
return log
|
curiosity-main
|
curiosity/util.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Dict
import torch
from overrides import overrides
from allennlp.common.registrable import Registrable
from allennlp.modules.similarity_functions.similarity_function import SimilarityFunction
class FactRanker(torch.nn.Module, Registrable):
@overrides
def forward(self, dialog_context: torch.Tensor, fact_repr: torch.Tensor) -> Dict:
"""
Accept the dialog context, fact representations, and fact labels
to produce logits/loss for fact ranking
The output should be a dictionary with keys 'logits' only if labels are None,
else 'logits' and 'loss'
"""
raise NotImplementedError
@FactRanker.register("mean_logit_ranker")
class MeanLogitRanker(FactRanker):
def __init__(self, similarity_function: SimilarityFunction):
super().__init__()
self._similarity_function = similarity_function
@overrides
def forward(
self,
# TODO: pass in the prior message
# (batch_size, n_turns, dc_dim)
shifted_dialog_context: torch.Tensor,
# One representation vector per fact
# (batch_size, n_turns, n_facts, repr_dim)
fact_repr: torch.Tensor,
) -> Dict:
# Make the dims work
shifted_dc_unsqueeze = shifted_dialog_context.unsqueeze(2)
logits = self._similarity_function(shifted_dc_unsqueeze, fact_repr)
return logits
|
curiosity-main
|
curiosity/nn.py
|
curiosity-main
|
curiosity/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Dict, Optional, Union
import math
import torch
from torch import nn
from allennlp.nn.util import (
get_text_field_mask, sequence_cross_entropy_with_logits,
masked_mean
)
from allennlp.data.vocabulary import Vocabulary
from allennlp.models import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.training.metrics import CategoricalAccuracy, Average
from curiosity.nn import FactRanker
from curiosity.metrics import MeanReciprocalRank, MultilabelMicroF1
from curiosity.reader import DIALOG_ACT_LABELS
from curiosity.bert import BertEncoder
def gelu(x):
return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0)))
class GeLU(nn.Module):
__constants__ = ['inplace']
def __init__(self, inplace=False):
super(GeLU, self).__init__()
self.inplace = inplace
def forward(self, input_):
return gelu(input_)
class Clamp(nn.Module):
def __init__(self, should_clamp: bool = False):
super(Clamp, self).__init__()
self._should_clamp = should_clamp
def forward(self, input_):
if self._should_clamp:
return 0.0 * input_
else:
return input_
@Model.register('baseline_curiosity_model')
class CuriosityBaselineModel(Model):
def __init__(self,
vocab: Vocabulary,
use_glove: bool,
use_bert: bool,
bert_trainable: bool,
bert_name: str,
mention_embedder: TextFieldEmbedder,
dialog_context: FeedForward,
fact_ranker: FactRanker,
dropout_prob: float,
sender_emb_size: int,
act_emb_size: int,
fact_loss_weight: float,
fact_pos_weight: float,
utter_embedder: TextFieldEmbedder = None,
utter_context: Seq2VecEncoder = None,
disable_known_entities: bool = False,
disable_dialog_acts: bool = False,
disable_likes: bool = False,
disable_facts: bool = False):
super().__init__(vocab)
self._disable_known_entities = disable_known_entities
self._disable_dialog_acts = disable_dialog_acts
self._clamp_dialog_acts = Clamp(should_clamp=disable_dialog_acts)
self._disable_likes = disable_likes
self._clamp_likes = Clamp(should_clamp=disable_likes)
self._disable_facts = disable_facts
self._clamp_facts = Clamp(should_clamp=disable_facts)
self._fact_loss_weight = fact_loss_weight
self._fact_pos_weight = fact_pos_weight
if int(use_glove) + int(use_bert) != 1:
raise ValueError('Cannot use bert and glove together')
self._use_glove = use_glove
self._use_bert = use_bert
self._bert_trainable = bert_trainable
self._bert_name = bert_name
self._utter_embedder = utter_embedder
self._utter_context = utter_context
# Bert encoder is embedder + context
if use_bert:
# Not trainable for now
print('Using BERT encoder ...')
self._bert_encoder = BertEncoder(
self._bert_name, requires_grad=bert_trainable
)
self._dist_utter_context = None
self._utter_dim = self._bert_encoder.get_output_dim()
else:
print('Using LSTM encoder ...')
self._bert_encoder = None
self._dist_utter_context = TimeDistributed(self._utter_context)
self._utter_dim = self._utter_context.get_output_dim()
self._dialog_context = dialog_context
self._fact_ranker = fact_ranker
# Easier to code as cross entropy with two classes
# Likes are per message, for only assistant messages
self._like_classifier = nn.Linear(
self._dialog_context.get_output_dim(), 2
)
self._like_accuracy = CategoricalAccuracy()
self._like_loss_metric = Average()
# Dialog acts are per message, for all messages
# This network predicts the dialog act of the current message
# for both student and teacher
self._da_classifier = nn.Sequential(
nn.Linear(
self._utter_dim + self._dialog_context.get_output_dim(),
self._dialog_context.get_output_dim()
),
GeLU(),
nn.Linear(
self._dialog_context.get_output_dim(),
vocab.get_vocab_size(DIALOG_ACT_LABELS)
)
)
self._da_bce_loss = torch.nn.BCEWithLogitsLoss(reduction='none')
self._da_f1_metric = MultilabelMicroF1()
self._da_loss_metric = Average()
# This network predicts what the next action should be
# It predicts for user and assistant since there isn't a real
# reason to restrict that
self._policy_classifier = nn.Sequential(
nn.Linear(
self._dialog_context.get_output_dim(),
self._dialog_context.get_output_dim()
),
GeLU(),
nn.Linear(
self._dialog_context.get_output_dim(),
vocab.get_vocab_size(DIALOG_ACT_LABELS)
)
)
self._policy_bce_loss = torch.nn.BCEWithLogitsLoss(reduction='none')
self._policy_f1_metric = MultilabelMicroF1()
self._policy_loss_metric = Average()
self._fact_mrr = MeanReciprocalRank()
self._fact_loss_metric = Average()
self._dropout_prob = dropout_prob
self._dropout = nn.Dropout(dropout_prob)
# Fact use is much less prevalant, about 9 times less so, so factor that in
self._fact_bce_loss = torch.nn.BCEWithLogitsLoss(
reduction='none',
pos_weight=torch.Tensor([self._fact_pos_weight])
)
def get_metrics(self, reset: bool = False):
da_metrics = self._da_f1_metric.get_metric(reset=reset)
policy_metrics = self._policy_f1_metric.get_metric(reset=reset)
metrics_to_report = {
'like_accuracy': self._like_accuracy.get_metric(reset=reset),
'like_loss': self._like_loss_metric.get_metric(reset=reset),
'fact_mrr': self._fact_mrr.get_metric(reset=reset),
'fact_loss': self._fact_loss_metric.get_metric(reset=reset),
'da_loss': self._da_loss_metric.get_metric(reset=reset),
'da_micro_f1': da_metrics['f1'],
'da_micro_precision': da_metrics['precision'],
'da_micro_recall': da_metrics['recall'],
'policy_micro_f1': policy_metrics['f1'],
'policy_micro_precision': policy_metrics['precision'],
'policy_micro_recall': policy_metrics['recall'],
}
metrics_to_report['total'] = \
metrics_to_report['fact_mrr'] + \
metrics_to_report['policy_micro_f1'] + \
metrics_to_report['da_micro_f1'] + \
metrics_to_report['like_accuracy']
return metrics_to_report
def forward(self,
messages: Dict[str, torch.Tensor],
# (batch_size, n_turns, n_facts, n_words)
facts: Dict[str, torch.Tensor],
# (batch_size, n_turns)
senders: torch.Tensor,
# (batch_size, n_turns, n_acts)
dialog_acts: torch.Tensor,
# (batch_size, n_turns)
dialog_acts_mask: torch.Tensor,
# (batch_size, n_entities)
known_entities: Dict[str, torch.Tensor],
# (batch_size, 1)
focus_entity: Dict[str, torch.Tensor],
# (batch_size, n_turns, n_facts)
fact_labels: Optional[torch.Tensor] = None,
# (batch_size, n_turns, 2)
likes: Optional[torch.Tensor] = None,
metadata: Optional[Dict] = None):
output = {}
# Take care of the easy stuff first
if self._use_bert:
# (batch_size, n_turns, n_words, emb_dim)
context, utter_mask = self._bert_encoder(messages)
context = self._dropout(context)
else:
# (batch_size, n_turns)
# This is the mask since not all dialogs have same number
# of turns
utter_mask = get_text_field_mask(messages)
# (batch_size, n_turns, n_words)
# Mask since not all utterances have same number of words
# Wrapping dim skips over n_messages dim
text_mask = get_text_field_mask(messages, num_wrapping_dims=1)
# (batch_size, n_turns, n_words, emb_dim)
embed = self._dropout(self._utter_embedder(messages))
# (batch_size, n_turns, hidden_dim)
context = self._dist_utter_context(embed, text_mask)
# (batch_size, n_turns, hidden_dim)
# n_turns = context.shape[1]
dialog_context = self._dialog_context(context)
# (batch_size, n_turns, hidden_dim)
# This assumes dialog_context does not peek into future
# dialog_context = self._dialog_context(full_context, utter_mask)
# shift context one right, pad with zeros at front
# This makes it so that utter_t is paired with context_t-1
# which is what we want
# This is useful in a few different places, so compute it here once
shape = dialog_context.shape
shifted_context = torch.cat((
dialog_context.new_zeros([shape[0], 1, shape[2]]),
dialog_context[:, :-1, :]
), dim=1)
has_loss = False
if self._disable_dialog_acts:
da_loss = 0
policy_loss = 0
else:
# Dialog act per utter loss
has_loss = True
da_loss = self._compute_da_loss(
output,
context, shifted_context, utter_mask,
dialog_acts, dialog_acts_mask
)
# Policy loss
policy_loss = self._compute_policy_loss(
output,
shifted_context, utter_mask,
dialog_acts, dialog_acts_mask
)
if self._disable_facts:
# If facts are disabled, don't output anything related
# to them
fact_loss = 0
else:
if self._use_bert:
# (batch_size, n_turns, n_words, emb_dim)
fact_repr, fact_mask = self._bert_encoder(facts)
fact_repr = self._dropout(fact_repr)
fact_mask[:, ::2] = 0
else:
# (batch_size, n_turns, n_facts)
# Wrapping dim skips over n_messages
fact_mask = get_text_field_mask(facts, num_wrapping_dims=1)
# In addition to masking padded facts, also explicitly mask
# user turns just in case
fact_mask[:, ::2] = 0
# (batch_size, n_turns, n_facts, n_words)
# Wrapping dim skips over n_turns and n_facts
fact_text_mask = get_text_field_mask(facts, num_wrapping_dims=2)
# (batch_size, n_turns, n_facts, n_words, emb_dim)
# Share encoder with utter encoder
# Again, stupid dimensions
fact_embed = self._dropout(self._utter_embedder(facts))
shape = fact_embed.shape
word_dim = shape[-2]
emb_dim = shape[-1]
reshaped_facts = fact_embed.view(-1, word_dim, emb_dim)
reshaped_fact_text_mask = fact_text_mask.view(-1, word_dim)
reshaped_fact_repr = self._utter_context(
reshaped_facts, reshaped_fact_text_mask
)
# No more emb dimension or word/seq dim
fact_repr = reshaped_fact_repr.view(shape[:-2] + (-1,))
fact_logits = self._fact_ranker(
shifted_context,
fact_repr,
)
output['fact_logits'] = fact_logits
if fact_labels is not None:
has_loss = True
fact_loss = self._compute_fact_loss(
fact_logits, fact_labels, fact_mask
)
self._fact_loss_metric(fact_loss.item())
self._fact_mrr(fact_logits, fact_labels, mask=fact_mask)
else:
fact_loss = 0
if self._disable_likes:
like_loss = 0
else:
has_loss = True
# (batch_size, n_turns, 2)
like_logits = self._like_classifier(dialog_context)
output['like_logits'] = like_logits
# There are several masks here to get the loss/metrics correct
# - utter_mask: mask out positions that do not have an utterance
# - user_mask: mask out positions that have a user utterances
# since their turns are never liked
# Using new_ones() preserves the type of the tensor
user_mask = utter_mask.new_ones(utter_mask.shape)
# Since the user is always even, this masks out user positions
user_mask[:, ::2] = 0
final_mask = utter_mask * user_mask
masked_likes = likes * final_mask
if likes is not None:
has_loss = True
like_loss = sequence_cross_entropy_with_logits(
like_logits, masked_likes, final_mask
)
self._like_accuracy(like_logits, masked_likes, final_mask)
self._like_loss_metric(like_loss.item())
else:
like_loss = 0
if has_loss:
output['loss'] = (
self._fact_loss_weight * fact_loss
+ like_loss
+ da_loss + policy_loss
)
return output
def _compute_da_loss(self,
output_dict,
messages: torch.Tensor,
shifted_context: torch.Tensor,
utter_mask: torch.Tensor,
dialog_acts: torch.Tensor,
dialog_acts_mask: torch.Tensor):
"""
Given utterance at turn t, get the context (utter + acts) from t-1,
the utter_t, and predict the act
"""
message_w_context = torch.cat((
messages, shifted_context
), dim=-1)
# (batch_size, n_turns, n_dialog_acts)
da_logits = self._da_classifier(message_w_context)
output_dict['da_logits'] = da_logits
da_unreduced_loss = self._da_bce_loss(da_logits, dialog_acts.float())
# Note: the last dimension is expanded from singleton to n_dialog_acts
# Since the mask is at turn level
# (batch_size, n_turns, n_dialog_acts)
da_combined_mask = (
dialog_acts_mask.float().unsqueeze(-1)
* utter_mask.float().unsqueeze(-1)
).expand_as(da_unreduced_loss)
da_unreduced_loss = da_combined_mask * da_unreduced_loss
# Mean loss over non-masked inputs, avoid division by zero
da_loss = da_unreduced_loss.sum() / da_combined_mask.sum().clamp(min=1)
da_loss_item = da_loss.item()
self._da_loss_metric(da_loss_item)
# (batch_size, n_turns, n_dialog_acts)
da_preds = (torch.sigmoid(da_logits) > .5).long()
self._da_f1_metric(da_preds, dialog_acts, da_combined_mask.long())
return da_loss
def _compute_policy_loss(self,
output_dict,
shifted_context: torch.Tensor,
utter_mask: torch.Tensor,
dialog_acts: torch.Tensor,
dialog_acts_mask: torch.Tensor):
"""
Given utterance at turn t, get the context (utter + acts) from t-1,
the utter_t, and predict the act
"""
# (batch_size, n_turns, n_dialog_acts)
policy_logits = self._policy_classifier(shifted_context)
output_dict['policy_logits'] = policy_logits
policy_unreduced_loss = self._policy_bce_loss(policy_logits, dialog_acts.float())
# Note: the last dimension is expanded from singleton to n_dialog_acts
# Since the mask is at turn level
# (batch_size, n_turns, n_dialog_acts)
policy_combined_mask = (
dialog_acts_mask.float().unsqueeze(-1)
* utter_mask.float().unsqueeze(-1)
).expand_as(policy_unreduced_loss)
policy_unreduced_loss = policy_combined_mask * policy_unreduced_loss
# Mean loss over non-masked inputs, avoid division by zero
policy_loss = policy_unreduced_loss.sum() / policy_combined_mask.sum().clamp(min=1)
policy_loss_item = policy_loss.item()
self._policy_loss_metric(policy_loss_item)
# (batch_size, n_turns, n_dialog_acts)
policy_preds = (torch.sigmoid(policy_logits) > .5).long()
self._policy_f1_metric(policy_preds, dialog_acts, policy_combined_mask.long())
return policy_loss
def _compute_fact_loss(self, logits: torch.Tensor,
fact_labels: torch.Tensor, fact_mask: torch.Tensor):
# Don't reduce to mask out padded facts
unreduced_loss = self._fact_bce_loss(logits, fact_labels)
total_loss = (
unreduced_loss * fact_mask.float()
).sum()
mean_loss = total_loss / fact_mask.float().sum()
return mean_loss
|
curiosity-main
|
curiosity/baseline_models.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Reader for curiosity dialog dataset. Below is a sample json with relevant structure
{
"dialogs": [
{
"messages": [
{
"message": "Hi, what do you know about St. Louis' history?",
"liked": false,
"sender": "user",
"facts": []
},
{
"message": "St. Louis had among worst air pollution in U.S.?",
"liked": true,
"sender": "assistant",
"facts": [
{
"fid": 54538,
"used": true
},
{
"fid": 54472,
"used": false
},
{
"fid": 54490,
"used": false
},
{
"fid": 54701,
"used": false
},
{
"fid": 54646,
"used": false
},
{
"fid": 54681,
"used": false
},
{
"fid": 54746,
"used": false
},
{
"fid": 54523,
"used": false
},
{
"fid": 54526,
"used": false
}
]
},
],
"known_entities": [
"Major League Baseball",
"United Kingdom",
"United States",
"United States Census Bureau",
"Missouri River"
],
"focus_entity": "St. Louis",
"dialog_id": 77,
"inferred_steps": false,
"created_time": 1568060716,
"aspects": [
"History",
"Education"
],
"first_aspect": "History",
"second_aspect": "Education",
"shuffle_facts": true,
"related_entities": [
"Auguste Chouteau",
"Spain",
"Susan Polgar",
"Darby, Pennsylvania",
"MacArthur Bridge (St. Louis)",
"Christ Church Cathedral, Oxford",
"Mound City, Illinois",
"Major League Baseball",
"United Kingdom",
"United States",
"Washington University in St. Louis",
"United States Census Bureau",
"Greater St. Louis",
"Missouri River"
]
}
]
}
"""
from typing import Dict, Optional, List
import json
import csv
import os
import numpy as np
from overrides import overrides
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.fields import (
TextField,
ListField,
MetadataField,
LabelField,
ArrayField,
MultiLabelField,
)
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
from allennlp.data.tokenizers.word_splitter import JustSpacesWordSplitter
from curiosity.db import verify_checksum, create_sql, Fact
USER = "user"
ASSISTANT = "assistant"
DIALOG_ACT_LABELS = "dialog_act_labels"
class MultiLabelFieldListCompat(MultiLabelField):
"""
Fixes a bug where if the field is used in a ListField, that the
number of labels is lost and causes an error.
"""
@overrides
def empty_field(self):
return MultiLabelField(
[], self._label_namespace, skip_indexing=True, num_labels=self._num_labels
)
def to_long_field(nums: List[int]) -> ArrayField:
return ArrayField(np.array(nums, dtype=np.long), dtype=np.long)
@DatasetReader.register("curiosity_dialog")
class CuriosityDialogReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
mention_tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
mention_indexers: Dict[str, TokenIndexer] = None,
):
super().__init__()
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {
"tokens": SingleIdTokenIndexer(lowercase_tokens=True),
}
self._mention_indexers = mention_indexers or {
"mentions": SingleIdTokenIndexer(),
}
self._mention_tokenizer = mention_tokenizer or WordTokenizer(
word_splitter=JustSpacesWordSplitter(),
)
self._fact_lookup: Optional[Dict[int, Fact]] = None
@overrides
def _read(self, file_path: str):
"""
file_path should point to a curiosity dialog file. In addition,
the directory that contains that file should also contain the
sqlite database associated with the dialogs named as below
- wiki_sql.sqlite.db
The intent is that there are
"""
with open(file_path) as f:
dataset = json.load(f)
dialogs = dataset["dialogs"]
directory = os.path.dirname(file_path)
db_path = os.path.join(directory, "wiki_sql.sqlite.db")
engine, session = create_sql(db_path)
facts = session.query(Fact).all()
self._fact_lookup = {f.id: f for f in facts}
verify_checksum(dataset["db_checksum"], db_path)
# store = CuriosityStore(db_path)
# fact_lookup = store.get_fact_lookup()
# TODO: Add in facts
for _, d in enumerate(dialogs):
yield self.text_to_instance(d)
session.close()
@overrides
def text_to_instance(self, dialog: Dict, ignore_fact: bool = False):
msg_texts = []
msg_senders = []
msg_likes = []
msg_acts = []
msg_act_mask = []
msg_facts = []
msg_fact_labels = []
metadata_fact_labels = []
if len(dialog["messages"]) == 0:
raise ValueError("There are no dialog messages")
known_entities = [
Token(text="ENTITY/" + t.replace(" ", "_"), idx=idx)
for idx, t in enumerate(dialog["known_entities"])
]
if len(known_entities) == 0:
known_entities.append(Token(text="@@YOUKNOWNOTHING@@", idx=0))
known_entities_field = TextField(known_entities, self._mention_indexers)
focus_entity = dialog["focus_entity"]
focus_entity_field = TextField(
[Token(text="ENTITY/" + focus_entity.replace(" ", "_"), idx=0)],
self._mention_indexers,
)
for msg in dialog["messages"]:
tokenized_msg = self._tokenizer.tokenize(msg["message"])
msg_texts.append(TextField(tokenized_msg, self._token_indexers))
msg_senders.append(0 if msg["sender"] == USER else 1)
msg_likes.append(
LabelField(
"liked" if msg["liked"] else "not_liked",
label_namespace="like_labels",
)
)
if msg["dialog_acts"] is None:
dialog_acts = ["@@NODA@@"]
act_mask = 0
else:
dialog_acts = msg["dialog_acts"]
act_mask = 1
dialog_acts_field = MultiLabelFieldListCompat(
dialog_acts, label_namespace=DIALOG_ACT_LABELS
)
msg_acts.append(dialog_acts_field)
msg_act_mask.append(act_mask)
curr_facts_text = []
curr_facts_labels = []
curr_metadata_fact_labels = []
if msg["sender"] == ASSISTANT:
for idx, f in enumerate(msg["facts"]):
if ignore_fact:
fact_text = "dummy fact"
else:
fact = self._fact_lookup[f["fid"]]
fact_text = fact.text
# TODO: These are already space tokenized
tokenized_fact = self._tokenizer.tokenize(fact_text)
# 99% of text length is 77
tokenized_fact = tokenized_fact[:80]
curr_facts_text.append(
TextField(tokenized_fact, self._token_indexers)
)
if f["used"]:
curr_facts_labels.append(idx)
curr_metadata_fact_labels.append(idx)
else:
# Users don't have facts, but lets avoid divide by zero
curr_facts_text.append(
TextField([Token(text="@@NOFACT@@", idx=0)], self._token_indexers)
)
msg_facts.append(ListField(curr_facts_text))
# Add in a label if there are no correct indices
if len(curr_facts_labels) == 0:
curr_metadata_fact_labels.append(-1)
n_facts = len(curr_facts_text)
fact_label_arr = np.zeros(n_facts, dtype=np.float32)
if len(curr_facts_labels) > 0:
fact_label_arr[curr_facts_labels] = 1
msg_fact_labels.append(ArrayField(fact_label_arr, dtype=np.float32))
metadata_fact_labels.append(curr_metadata_fact_labels)
return Instance(
{
"messages": ListField(msg_texts),
"facts": ListField(msg_facts),
"fact_labels": ListField(msg_fact_labels),
"likes": ListField(msg_likes),
"dialog_acts": ListField(msg_acts),
"dialog_acts_mask": to_long_field(msg_act_mask),
"senders": to_long_field(msg_senders),
"focus_entity": focus_entity_field,
"known_entities": known_entities_field,
"metadata": MetadataField(
{
"dialog_id": dialog["dialog_id"],
"n_message": len(msg_texts),
"fact_labels": metadata_fact_labels,
"known_entities": dialog["known_entities"],
"focus_entity": dialog["focus_entity"],
}
),
}
)
@DatasetReader.register("curiosity_paraphrase")
class CuriosityParaphraseReader(DatasetReader):
def __init__(
self,
filter_user_messages: bool = True,
filter_empty_facts: bool = True,
lazy: bool = False,
) -> None:
super().__init__(lazy)
self._tokenizer = WordTokenizer(
word_splitter=JustSpacesWordSplitter(),
)
self._token_indexers = {"tokens": SingleIdTokenIndexer()}
self._fact_lookup: Optional[Dict[int, Fact]] = None
self._filter_user_messages = filter_user_messages
self._filter_empty_facts = filter_empty_facts
@overrides
def _read(self, file_path: str):
"""
file_path should point to a curiosity dialog file. In addition,
the directory that contains that file should also contain the
sqlite database associated with the dialogs named as below
- wiki_sql.sqlite.db
"""
with open(file_path) as f:
dataset = json.load(f)
dialogs = dataset["dialogs"]
directory = os.path.dirname(file_path)
db_path = os.path.join(directory, "wiki_sql.sqlite.db")
engine, session = create_sql(db_path)
facts = session.query(Fact).all()
self._fact_lookup = {f.id: f for f in facts}
verify_checksum(dataset["db_checksum"], db_path)
# store = CuriosityStore(db_path)
# fact_lookup = store.get_fact_lookup()
# TODO: Add in facts
for d in dialogs:
for msg in d["messages"]:
# Filter out user messages
if self._filter_user_messages and msg["sender"] == USER:
continue
# Filter out messages without paired facts, if required
if self._filter_empty_facts:
facts_used = [f for f in msg["facts"] if f["used"]]
if len(facts_used) == 0:
continue
yield self.text_to_instance(msg, d)
session.close()
@overrides
def text_to_instance(self, msg: Dict, d: Dict):
# (1) Prepare facts
# Set max length of each fact text: 300 characters
fact_texts = [
self._fact_lookup[f["fid"]].text[:300] for f in msg["facts"] if f["used"]
]
# Aggregate facts
aggregated_fact_text = " ".join(fact_texts)
# If it doesn't have any fact, put a default symbol
if aggregated_fact_text == "":
aggregated_fact_text = "@@NOFACT@@"
# Wrap each sentence with start and end symbols
aggregated_fact_text = "{start_symbol} {text} {end_symbol}".format(
start_symbol=START_SYMBOL, text=aggregated_fact_text, end_symbol=END_SYMBOL
)
# Tokenize facts
tokenized_fact = self._tokenizer.tokenize(aggregated_fact_text)[:150]
# (2) Prepare messages
message = msg["message"] if msg["message"] != "" else "@@NOMESSAGE@@"
# Wrap each sentence with start and end symbols
message = "{start_symbol} {text} {end_symbol}".format(
start_symbol=START_SYMBOL, text=message, end_symbol=END_SYMBOL
)
# Tokenize
tokenized_message = self._tokenizer.tokenize(message)[:150]
# (3) Prepare dialog acts
dialog_acts = (
msg["dialog_acts"] if msg["dialog_acts"] is not None else ["@@NODA@@"]
)
# (4) Prepare sender information
sender = "user" if msg["sender"] == USER else "teacher"
return Instance(
{
"source_tokens": TextField(tokenized_fact, self._token_indexers),
"target_tokens": TextField(tokenized_message, self._token_indexers),
"dialog_acts": MultiLabelField(
dialog_acts, label_namespace=DIALOG_ACT_LABELS
),
"sender": LabelField(sender, label_namespace="sender"),
"metadata": MetadataField(
{
"dialog_id": d["dialog_id"],
"n_message": len(d["messages"]),
}
),
}
)
@DatasetReader.register("fact_paraphrase")
class FactParaphraseReader(DatasetReader):
def __init__(
self,
filter_empty_facts: bool = True,
lazy: bool = False,
) -> None:
super().__init__(lazy)
self._tokenizer = WordTokenizer(
word_splitter=JustSpacesWordSplitter(),
)
self._token_indexers = {"tokens": SingleIdTokenIndexer()}
self._fact_lookup: Optional[Dict[int, Fact]] = None
self._filter_empty_facts = filter_empty_facts
@overrides
def _read(self, file_path: str):
"""
file_path should point to a fact paraphrase dataset file.
It assumes the following format: {fact}\t{paraphrased}\n ...
"""
with open(file_path) as f:
reader = csv.reader(f, delimiter="\t")
for row in reader:
if self._filter_empty_facts and row[0] == "":
continue
yield self.text_to_instance(row)
@overrides
def text_to_instance(self, row: List):
# (1) Prepare facts
# Set max length of each fact text: 300 characters
fact = row[0][:300]
# If it doesn't have any fact, put a default symbol
if fact == "":
fact = "@@NOFACT@@"
# Tokenize facts
tokenized_fact = (
[Token(START_SYMBOL)]
+ self._tokenizer.tokenize(fact)[:150]
+ [Token(END_SYMBOL)]
)
# (2) Prepare the paraphrased message
message = row[1]
# Tokenize
tokenized_message = (
[Token(START_SYMBOL)]
+ self._tokenizer.tokenize(message)[:150]
+ [Token(END_SYMBOL)]
)
# (3) Prepare dialog acts
dialog_acts = ["@@NODA@@"]
# (4) Prepare sender information
sender = "teacher"
return Instance(
{
"source_tokens": TextField(tokenized_fact, self._token_indexers),
"target_tokens": TextField(tokenized_message, self._token_indexers),
"dialog_acts": MultiLabelField(
dialog_acts, label_namespace="dialog_acts"
),
"sender": LabelField(sender, label_namespace="sender"),
"metadata": MetadataField(
{
"dialog_id": -1,
"n_message": -1,
}
),
}
)
|
curiosity-main
|
curiosity/reader.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import json
from itertools import cycle
import click
import _jsonnet
from curiosity.stats import (
MajorityLikes,
TfidfFactBaseline,
MajorityDialogActs,
MajorityPolicyActs,
save_metrics,
fact_length_stats,
)
from curiosity.util import get_logger
log = get_logger(__name__)
TRAIN_DIALOGS = "dialog_data/curiosity_dialogs.train.json"
VAL_DIALOGS = "dialog_data/curiosity_dialogs.val.json"
TEST_DIALOGS = "dialog_data/curiosity_dialogs.test.json"
ZERO_DIALOGS = "dialog_data/curiosity_dialogs.test_zero.json"
@click.group()
def cli():
pass
@cli.command()
@click.argument("metrics_dir")
def majority(metrics_dir):
"""
Obtain a majority baseline for like prediction
"""
model = MajorityLikes()
model.train(TRAIN_DIALOGS)
val_score = model.score(VAL_DIALOGS)
test_score = model.score(TEST_DIALOGS)
zero_score = model.score(ZERO_DIALOGS)
log.info("Like prediction")
log.info(f"Validation Score: {val_score}")
log.info(f"Test Score: {test_score}")
log.info(f"Zero Score: {zero_score}")
save_metrics(
{
"best_validation_like_accuracy": val_score,
},
os.path.join(metrics_dir, "like_majority_val_metrics.json"),
)
save_metrics(
{
"best_validation_like_accuracy": test_score,
},
os.path.join(metrics_dir, "like_majority_test_metrics.json"),
)
save_metrics(
{
"best_validation_like_accuracy": zero_score,
},
os.path.join(metrics_dir, "like_majority_zero_metrics.json"),
)
@cli.command()
@click.argument("metrics_dir")
def majority_da(metrics_dir):
"""
Obtain a majority baseline for dialog acts prediction
"""
model = MajorityDialogActs()
model.train(TRAIN_DIALOGS)
val_score = model.score(VAL_DIALOGS)
test_score = model.score(TEST_DIALOGS)
zero_score = model.score(ZERO_DIALOGS)
log.info("Dialog Acts prediction")
log.info(f"Validation Score: {val_score}")
log.info(f"Test Score: {test_score}")
log.info(f"Zero Score: {zero_score}")
save_metrics(
{
"best_validation_da_micro_f1": val_score,
},
os.path.join(metrics_dir, "da_majority_val_metrics.json"),
)
save_metrics(
{
"best_validation_da_micro_f1": test_score,
},
os.path.join(metrics_dir, "da_majority_test_metrics.json"),
)
save_metrics(
{
"best_validation_da_micro_f1": zero_score,
},
os.path.join(metrics_dir, "da_majority_zero_metrics.json"),
)
@cli.command()
@click.argument("metrics_dir")
def majority_policy(metrics_dir):
"""
Obtain a majority baseline for policy acts prediction
"""
model = MajorityPolicyActs()
model.train(TRAIN_DIALOGS)
val_score = model.score(VAL_DIALOGS)
test_score = model.score(TEST_DIALOGS)
zero_score = model.score(ZERO_DIALOGS)
log.info("Policy Acts prediction")
log.info(f"Validation Score: {val_score}")
log.info(f"Test Score: {test_score}")
log.info(f"Zero Score: {zero_score}")
save_metrics(
{
"best_validation_policy_micro_f1": val_score,
},
os.path.join(metrics_dir, "policy_majority_val_metrics.json"),
)
save_metrics(
{
"best_validation_policy_micro_f1": test_score,
},
os.path.join(metrics_dir, "policy_majority_test_metrics.json"),
)
save_metrics(
{
"best_validation_policy_micro_f1": zero_score,
},
os.path.join(metrics_dir, "policy_majority_zero_metrics.json"),
)
@cli.command()
@click.argument("tfidf_path")
@click.argument("wiki_sql_path")
@click.argument("metrics_dir")
def fact_tfidf(tfidf_path, wiki_sql_path, metrics_dir):
"""
Train and evaluate a tfidf baseline in the same format as the allennlp
models
"""
model = TfidfFactBaseline(tfidf_path, wiki_sql_path=wiki_sql_path)
val_score = model.score(VAL_DIALOGS)
test_score = model.score(TEST_DIALOGS)
zero_score = model.score(ZERO_DIALOGS)
log.info("Fact Prediction")
log.info(f"Validation Score: {val_score}")
log.info(f"Test Score: {test_score}")
log.info(f"Zero Score: {zero_score}")
save_metrics(
{"best_validation_fact_mrr": val_score},
os.path.join(metrics_dir, "mrr_tfidf_val_metrics.json"),
)
save_metrics(
{"best_validation_fact_mrr": test_score},
os.path.join(metrics_dir, "mrr_tfidf_test_metrics.json"),
)
save_metrics(
{"best_validation_fact_mrr": zero_score},
os.path.join(metrics_dir, "mrr_tfidf_zero_metrics.json"),
)
@cli.command()
@click.argument("data_path")
def fact_lengths(data_path):
fact_length_stats(data_path)
@cli.command()
@click.option("--gpu", multiple=True)
@click.argument("metrics_dir")
def gen_configs(gpu, metrics_dir):
"""
Create the configuration files for the different models.
This is separate from hyper parameter tuning and directly
corresponds to models in the paper table
The gpu flag can be taken multiple times and indicates to write
jobs configured to use
those gpus.
"""
gpu_list = []
if isinstance(gpu, (tuple, list)):
if len(gpu) == 0:
gpu_list.append(-1)
else:
for e in gpu:
gpu_list.append(int(e))
elif isinstance(gpu, int):
gpu_list.append(gpu)
elif isinstance(gpu, str):
gpu_list.append(int(gpu))
else:
raise ValueError("wrong input type")
gpu_list = cycle(gpu_list)
# Note: key should be valid in a unix filename
# Values must be strings that represent jsonnet "code"
# These names are shared to the figure plotting code since the filename
# is based on it
all_configs = {
# This is the full model, so default params
"glove_bilstm": {},
# Ablations leaving on out
"glove_bilstm-known": {"disable_known_entities": "true"},
# Completely ablate out anything related to dialog acts
"glove_bilstm-da": {"disable_dialog_acts": "true"},
# Completely ablate out anything related to likes
"glove_bilstm-like": {"disable_likes": "true"},
# Completley ablate out anything related to facts
"glove_bilstm-facts": {"disable_facts": "true"},
# This is the full model, so default params
"bert": {"use_glove": "false", "use_bert": "true"},
# Ablations leaving on out
"bert-known": {
"disable_known_entities": "true",
"use_glove": "false",
"use_bert": "true",
},
# Completely ablate out anything related to dialog acts
"bert-da": {
"disable_dialog_acts": "true",
"use_glove": "false",
"use_bert": "true",
},
# Completely ablate out anything related to likes
"bert-like": {
"disable_likes": "true",
"use_glove": "false",
"use_bert": "true",
},
# Completley ablate out anything related to facts
"bert-facts": {
"disable_facts": "true",
"use_glove": "false",
"use_bert": "true",
},
}
with open("run_allennlp.sh", "w") as exp_f:
exp_f.write("#!/usr/bin/env bash\n")
for name, conf in all_configs.items():
model_conf: str = _jsonnet.evaluate_file(
"configs/model.jsonnet", tla_codes=conf
)
config_path = os.path.join("configs/generated", name + ".json")
model_path = os.path.join("models", name)
with open(config_path, "w") as f:
f.write(model_conf)
job_gpu = next(gpu_list)
if job_gpu != -1:
gpu_option = (
" -o '" + json.dumps({"trainer": {"cuda_device": job_gpu}}) + "'"
)
else:
gpu_option = ""
exp_f.write(f"# Experiments for: {name}\n")
exp_f.write(
f"allennlp train --include-package curiosity -s {model_path} -f {config_path}{gpu_option}\n"
)
val_out = os.path.join(metrics_dir, f"{name}_val_metrics.json")
test_out = os.path.join(metrics_dir, f"{name}_test_metrics.json")
zero_out = os.path.join(metrics_dir, f"{name}_zero_metrics.json")
exp_f.write(
f"allennlp evaluate --include-package curiosity{gpu_option} --output-file {val_out} {model_path} {VAL_DIALOGS}\n"
)
exp_f.write(
f"allennlp evaluate --include-package curiosity{gpu_option} --output-file {test_out} {model_path} {TEST_DIALOGS}\n"
)
exp_f.write(
f"allennlp evaluate --include-package curiosity{gpu_option} --output-file {zero_out} {model_path} {ZERO_DIALOGS}\n"
)
exp_f.write("\n")
@cli.command()
@click.argument("emb_path")
@click.argument("out_path")
def filter_emb(emb_path, out_path):
"""
Given a path to a valid pretrained embeddings file from wikipedia2vec,
filter out anything that is not an entity: IE not prefixed with ENTITY/
"""
with open(emb_path) as in_f:
rows = []
_, dim = next(in_f).strip().split()
dim = dim
for line in in_f:
if line.startswith("ENTITY/"):
rows.append(line)
with open(out_path, "w") as out_f:
out_f.write(f"{len(rows)} {dim}\n")
for r in rows:
out_f.write(r)
if __name__ == "__main__":
cli()
|
curiosity-main
|
curiosity/cli.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This file computes baseline accuracies based on majority
based voting
"""
from typing import Dict, Optional, List
import json
import numpy as np
import pandas as pd
from allennlp.data.tokenizers.token import Token
from curiosity.reader import CuriosityDialogReader
from curiosity.similarity import Similarity
from curiosity.util import get_logger
log = get_logger(__name__)
ASSISTANT_IDX = 1
def save_metrics(metrics: Dict, out_path: str):
"""
Save an allennlp compatible metrics dictionary
"""
out_dict = {
"best_epoch": 0,
"peak_cpu_memory_MB": 0,
"training_duration": "0:00:0",
"training_start_epoch": 0,
"training_epochs": 0,
"epoch": 0,
"training_like_accuracy": 0.0,
"training_loss": 0.0,
"training_cpu_memory_MB": 0.0,
"validation_like_accuracy": 0.0,
"validation_loss": 0.0,
"best_validation_like_accuracy": 0.0,
"best_validation_loss": 0.0,
}
for key, val in metrics.items():
out_dict[key] = val
with open(out_path, "w") as f:
json.dump(out_dict, f)
class MajorityLikes:
def __init__(self):
self._n_total_assistant_msgs = 0
self._n_liked_assistant_msgs = 0
self._like_all = True
def train(self, data_path: str) -> None:
log.info(f"Training majority classifier with: {data_path}")
self._n_total_assistant_msgs = 0
self._n_liked_assistant_msgs = 0
n_messages = 0
dialogs = CuriosityDialogReader().read(data_path)
log.info(f"N Dialogs: {len(dialogs)}")
for d in dialogs:
dialog_senders = d["senders"].array
dialog_likes = d["likes"]
for sender, liked in zip(dialog_senders, dialog_likes):
# Only care about assistant messages
if sender == ASSISTANT_IDX:
if liked.label == "liked":
self._n_liked_assistant_msgs += 1
self._n_total_assistant_msgs += 1
n_messages += 1
self._n_total_assistant_msgs = max(1, self._n_total_assistant_msgs)
log.info(f"N Liked Assistant Messages: {self._n_liked_assistant_msgs}")
log.info(f"N Total Assistant Messages: {self._n_total_assistant_msgs}")
log.info(f"N Total Messages: {n_messages}")
if (self._n_liked_assistant_msgs / self._n_total_assistant_msgs) > 0.5:
self._like_all = True
else:
self._like_all = False
log.info(f"Majority Class Liked: {self._like_all}")
def score(self, data_path: str) -> float:
log.info(f"Scoring majority classifier with: {data_path}")
dialogs = CuriosityDialogReader().read(data_path)
log.info(f"N Dialogs: {len(dialogs)}")
correct = 0
total = 0
n_messages = 0
for d in dialogs:
dialog_senders = d["senders"].array
dialog_likes = d["likes"]
for sender, liked in zip(dialog_senders, dialog_likes):
if sender == ASSISTANT_IDX:
label = liked.label
# If liked and majority class in training was liked
if label == "liked" and self._like_all:
correct += 1
# If not liked and majority class in training was not liked
elif label == "liked" and not self._like_all:
correct += 1
total += 1
n_messages += 1
log.info(f"N Correct Assistant Messages: {correct}")
log.info(f"N Total Assistant Messages: {total}")
log.info(f"N Total Messages: {n_messages}")
total = max(1, total)
return correct / total
class MajorityDialogActs:
def __init__(self):
self._n_total_assistant_msgs = 0
self._n_total_acts = 0
self._count_per_turn = {}
self._majority_per_turn = {}
self._count = {}
self._majority = None
def train(self, data_path: str) -> None:
log.info(f"Training majority classifier with: {data_path}")
self._n_total_assistant_msgs = 0
n_messages = 0
dialogs = CuriosityDialogReader().read(data_path)
log.info(f"N Dialogs: {len(dialogs)}")
for d in dialogs:
dialog_senders = d["senders"].array
dialog_acts_list = d["dialog_acts"]
for i in range(len(dialog_senders)):
sender = dialog_senders[i]
acts = dialog_acts_list[i].labels
# Only care about assistant messages
if sender != ASSISTANT_IDX:
# Histogram stat per turn
if i not in self._count_per_turn:
self._count_per_turn[i] = {}
for act in acts:
# Histogram stat per turn
self._count_per_turn[i][act] = (
self._count_per_turn[i].get(act, 0) + 1
)
# Histogram stat overall
self._count[act] = self._count.get(act, 0) + 1
# Total count
self._n_total_acts += 1
self._n_total_assistant_msgs += 1
n_messages += 1
self._n_total_assistant_msgs = max(1, self._n_total_assistant_msgs)
log.info(f"N Total User Messages: {self._n_total_acts}")
log.info(f"N Total Acts: {self._n_total_assistant_msgs}")
log.info(f"N Total Messages: {n_messages}")
# Sort count overall
lst = [(count, act) for act, count in self._count.items()]
lst.sort(reverse=True)
# Majority act in this turn
self._majority = lst[0][1]
for turn_idx, act_stat in self._count_per_turn.items():
# Sort count_per_turn for each turn_idx
lst = [(count, act) for act, count in act_stat.items()]
lst.sort(reverse=True)
if len(lst) != 0:
majority_act = lst[0][1]
else:
majority_act = self._majority
# Majority act in this turn
self._majority_per_turn[turn_idx] = majority_act
print("Turn: %d, Majority Act: %s" % (turn_idx, majority_act))
log.info(f"Majority Act: {self._majority}")
log.info(f"Majority Map: {self._majority_per_turn}")
log.info(f"Count Map Per Turn: {self._count_per_turn}")
log.info(f"Count Map: {self._count}")
def score(self, data_path: str) -> float:
log.info(f"Scoring majority classifier with: {data_path}")
dialogs = CuriosityDialogReader().read(data_path)
log.info(f"N Dialogs: {len(dialogs)}")
correct = 0
total = 0
n_messages = 0
for d in dialogs:
dialog_senders = d["senders"].array
dialog_acts_list = d["dialog_acts"]
for i in range(len(dialog_senders)):
sender = dialog_senders[i]
acts = dialog_acts_list[i].labels
if sender != ASSISTANT_IDX:
for act in acts:
if i in self._majority_per_turn:
if act == self._majority_per_turn[i]:
correct += 1
else:
if act == self._majority:
correct += 1
total += len(acts)
n_messages += 1
log.info(f"N Correct Acts: {correct}")
log.info(f"N Total Acts: {total}")
log.info(f"N Total Messages: {n_messages}")
total = max(1, total)
n_messages = max(1, n_messages)
p = correct / n_messages # assumes 1 prediction per message
r = correct / total
f1 = 2 * (p * r) / (p + r)
return f1
class MajorityPolicyActs:
def __init__(self):
self._n_total_assistant_msgs = 0
self._n_total_acts = 0
self._count_per_turn = {}
self._majority_per_turn = {}
self._count = {}
self._majority = None
def train(self, data_path: str) -> None:
log.info(f"Training majority classifier with: {data_path}")
self._n_total_assistant_msgs = 0
n_messages = 0
dialogs = CuriosityDialogReader().read(data_path)
log.info(f"N Dialogs: {len(dialogs)}")
for d in dialogs:
dialog_senders = d["senders"].array
dialog_acts_list = d["dialog_acts"]
for i in range(len(dialog_senders)):
sender = dialog_senders[i]
acts = dialog_acts_list[i].labels
# Histogram stat per turn
if i not in self._count_per_turn:
self._count_per_turn[i] = {}
# Only care about assistant messages
if sender == ASSISTANT_IDX:
for act in acts:
# Histogram stat per turn
self._count_per_turn[i][act] = (
self._count_per_turn[i].get(act, 0) + 1
)
# Histogram stat overall
self._count[act] = self._count.get(act, 0) + 1
# Total count
self._n_total_acts += 1
self._n_total_assistant_msgs += 1
n_messages += 1
self._n_total_assistant_msgs = max(1, self._n_total_assistant_msgs)
log.info(f"N Total Assistant Messages: {self._n_total_acts}")
log.info(f"N Total Acts: {self._n_total_assistant_msgs}")
log.info(f"N Total Messages: {n_messages}")
# Sort count overall
lst = [(count, act) for act, count in self._count.items()]
lst.sort(reverse=True)
# Majority act in this turn
self._majority = lst[0][1]
for turn_idx, act_stat in self._count_per_turn.items():
# Sort count_per_turn for each turn_idx
lst = [(count, act) for act, count in act_stat.items()]
lst.sort(reverse=True)
if len(lst) != 0:
majority_act = lst[0][1]
else:
majority_act = self._majority
# Majority act in this turn
self._majority_per_turn[turn_idx] = majority_act
print("Turn: %d, Majority Act: %s" % (turn_idx, majority_act))
log.info(f"Majority Act: {self._majority}")
log.info(f"Majority Map: {self._majority_per_turn}")
log.info(f"Count Map Per Turn: {self._count_per_turn}")
log.info(f"Count Map: {self._count}")
def score(self, data_path: str) -> float:
log.info(f"Scoring majority classifier with: {data_path}")
dialogs = CuriosityDialogReader().read(data_path)
log.info(f"N Dialogs: {len(dialogs)}")
correct = 0
total = 0
n_messages = 0
for d in dialogs:
dialog_senders = d["senders"].array
dialog_acts_list = d["dialog_acts"]
for i in range(len(dialog_senders)):
sender = dialog_senders[i]
acts = dialog_acts_list[i].labels
if sender == ASSISTANT_IDX:
for act in acts:
if i in self._majority_per_turn:
if act == self._majority_per_turn[i]:
correct += 1
else:
if act == self._majority:
correct += 1
total += len(acts)
n_messages += 1
log.info(f"N Correct Acts: {correct}")
log.info(f"N Total Acts: {total}")
log.info(f"N Total Messages: {n_messages}")
total = max(1, total)
n_messages = max(1, n_messages)
p = correct / n_messages # assumes 1 prediction per message
r = correct / total
f1 = 2 * (p * r) / (p + r)
return f1
def tokens_to_str(tokens: List[Token]) -> str:
return " ".join(t.text for t in tokens)
class TfidfFactBaseline:
"""
Implements a simplistic baseline. This uses the tfidf vectorizer
fit for ranking facts shown to annotators. The highest similarity
fact is selected as the used fact. For metrics, precision and recall
are both computed. In the data, more than one fact is rarely used,
even if its possible to do.
"""
def __init__(self, tfidf_path: str, wiki_sql_path: Optional[str] = None):
self._similarity = Similarity()
self._similarity.load(tfidf_path)
def score(self, data_path: str):
dialogs = CuriosityDialogReader().read(data_path)
n_assistant_messages = 0
all_rr = []
for d in dialogs:
msg_history = []
dialog_senders = d["senders"].array
dialog_facts = d["facts"]
dialog_fact_labels = d["fact_labels"]
dialog_messages = d["messages"]
for msg, sender, facts, fact_labels in zip(
dialog_messages, dialog_senders, dialog_facts, dialog_fact_labels
):
if sender == ASSISTANT_IDX:
context = " ".join(msg_history)
fact_texts = [tokens_to_str(tokens) for tokens in facts]
doc_scores = self._similarity.score(context, fact_texts)
# First get a list where first position is maximal score
sorted_scores = np.argsort(-np.array(doc_scores))
exists_rel_doc = False
best_rank = None
for rel_idx in fact_labels.array:
if rel_idx != -1:
# Then find the rank + 1 of the relevant doc
exists_rel_doc = True
# import ipdb;ipdb.set_trace();
rank = np.where(sorted_scores == rel_idx)[0][0] + 1
# We only care about the best rank, if there are multiple
# relevant docs
if best_rank is None or rank < best_rank:
best_rank = rank
# Ignore this example if there is no relevant doc
if exists_rel_doc:
all_rr.append(1 / best_rank)
n_assistant_messages += 1
# Only add the actually used message after prediction
# Add user and assistant messages
msg_text = tokens_to_str(msg.tokens)
msg_history.append(msg_text)
mean_rr = np.mean(all_rr)
log.info(f"Msgs with Facts: {len(all_rr)}")
log.info(f"Total Assistant Msgs: {n_assistant_messages}")
log.info(f"MRR: {mean_rr}")
return mean_rr
def fact_length_stats(data_path: str):
dialogs = CuriosityDialogReader().read(data_path)
fact_lengths = []
for d in dialogs:
for facts in d["facts"]:
for f in facts:
fact_lengths.append({"n_tokens": f.sequence_length()})
df = pd.DataFrame(fact_lengths)
summary = df.describe(percentiles=[0.25, 0.5, 0.75, 0.8, 0.9, 0.95, 0.99])
log.info(f"Summary\n{summary}")
|
curiosity-main
|
curiosity/stats.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Dict, Union
import torch
from pytorch_pretrained_bert.modeling import BertModel
from allennlp.modules.token_embedders.bert_token_embedder import PretrainedBertModel
class BertEncoder(torch.nn.Module):
"""
Adapted from https://github.com/allenai/allennlp/blob/v0.8.5/allennlp/models/bert_for_classification.py
and https://github.com/allenai/allennlp/blob/master/allennlp/modules/seq2vec_encoders/bert_pooler.py#L14-L67
I ran into a lot of trouble trying to get this to work more generically and gave up
to just implement as a manual switch
"""
def __init__(
self,
bert_model: Union[str, BertModel],
requires_grad: bool = True,
index: str = "bert",
) -> None:
super().__init__()
if isinstance(bert_model, str):
self.bert_model = PretrainedBertModel.load(bert_model)
else:
self.bert_model = bert_model
for param in self.bert_model.parameters():
param.requires_grad = requires_grad
self._embedding_dim = self.bert_model.config.hidden_size
self._index = index
def forward(self, tokens: Dict[str, torch.LongTensor]) -> torch.Tensor:
# pylint: disable=arguments-differ
input_ids = tokens[self._index]
token_type_ids = tokens[f"{self._index}-type-ids"]
input_mask = (input_ids != 0).long()
# transformers lib doesn't like extra dimensions, and TimeDistributed
# expects a tensor
# This works since we only need independent encodings of each piece of text
if input_ids.dim() > 2:
shape = input_ids.shape
word_dim = shape[-1]
reshaped_input_ids = input_ids.view(-1, word_dim)
reshaped_token_type_ids = token_type_ids.view(-1, word_dim)
reshaped_input_mask = input_mask.view(-1, word_dim)
_, reshaped_pooled = self.bert_model(
input_ids=reshaped_input_ids,
token_type_ids=reshaped_token_type_ids,
attention_mask=reshaped_input_mask,
)
pooled = reshaped_pooled.view(shape[:-1] + (-1,))
else:
_, pooled = self.bert_model(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=input_mask,
)
# Current mask is wordpiece mask, we want an utterance mask
# So search for utterances with all masked wordpieces
utter_mask = (input_mask.sum(dim=-1) != 0).long()
return pooled, utter_mask
def get_output_dim(self) -> int:
return self._embedding_dim
|
curiosity-main
|
curiosity/bert.py
|
#!/usr/bin/env python3
#Copyright (c) Facebook, Inc. and its affiliates.
"""
Reader for curiosity dialog dataset. Below is a sample json with relevant structure
{
"dialogs": [
{
"messages": [
{
"message": "Hi, what do you know about St. Louis' history?",
"liked": false,
"sender": "user",
"facts": []
},
{
"message": "St. Louis had among worst air pollution in U.S.?",
"liked": true,
"sender": "assistant",
"facts": [
{
"fid": 54538,
"used": true
},
{
"fid": 54472,
"used": false
},
{
"fid": 54490,
"used": false
},
{
"fid": 54701,
"used": false
},
{
"fid": 54646,
"used": false
},
{
"fid": 54681,
"used": false
},
{
"fid": 54746,
"used": false
},
{
"fid": 54523,
"used": false
},
{
"fid": 54526,
"used": false
}
]
},
],
"known_entities": [
"Major League Baseball",
"United Kingdom",
"United States",
"United States Census Bureau",
"Missouri River"
],
"focus_entity": "St. Louis",
"dialog_id": 77,
"inferred_steps": false,
"created_time": 1568060716,
"aspects": [
"History",
"Education"
],
"first_aspect": "History",
"second_aspect": "Education",
"shuffle_facts": true,
"related_entities": [
"Auguste Chouteau",
"Spain",
"Susan Polgar",
"Darby, Pennsylvania",
"MacArthur Bridge (St. Louis)",
"Christ Church Cathedral, Oxford",
"Mound City, Illinois",
"Major League Baseball",
"United Kingdom",
"United States",
"Washington University in St. Louis",
"United States Census Bureau",
"Greater St. Louis",
"Missouri River"
]
}
]
}
"""
from typing import Dict, Optional, List
import json
import csv
import os
import numpy as np
from overrides import overrides
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.fields import (
TextField, ListField, MetadataField, LabelField, ArrayField, MultiLabelField
)
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
from allennlp.data.tokenizers.word_splitter import JustSpacesWordSplitter
from curiosity.db import verify_checksum, create_sql, Fact
USER = 'user'
ASSISTANT = 'assistant'
DIALOG_ACT_LABELS = 'dialog_act_labels'
MESSAGE_CUMULATIVE = False
DIALOG_MAX_LENGTH = 80
class MultiLabelFieldListCompat(MultiLabelField):
"""
Fixes a bug where if the field is used in a ListField, that the
number of labels is lost and causes an error.
"""
@overrides
def empty_field(self):
return MultiLabelField(
[], self._label_namespace,
skip_indexing=True,
num_labels=self._num_labels
)
def to_long_field(nums: List[int]) -> ArrayField:
return ArrayField(np.array(nums, dtype=np.long), dtype=np.long)
@DatasetReader.register('baseline_curiosity_dialog')
class BaselineCuriosityDialogReader(DatasetReader):
def __init__(self,
tokenizer: Tokenizer = None,
mention_tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
mention_indexers: Dict[str, TokenIndexer] = None):
super().__init__()
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {
'tokens': SingleIdTokenIndexer(lowercase_tokens=True),
}
self._mention_indexers = mention_indexers or {
'mentions': SingleIdTokenIndexer(),
}
self._mention_tokenizer = mention_tokenizer or WordTokenizer(
word_splitter=JustSpacesWordSplitter(),
)
self._fact_lookup: Optional[Dict[int, Fact]] = None
@overrides
def _read(self, file_path: str):
"""
file_path should point to a curiosity dialog file. In addition,
the directory that contains that file should also contain the
sqlite database associated with the dialogs named as below
- wiki_sql.sqlite.db
The intent is that there are
"""
with open(file_path) as f:
dataset = json.load(f)
dialogs = dataset['dialogs']
directory = os.path.dirname(file_path)
db_path = os.path.join(directory, 'wiki_sql.sqlite.db')
engine, session = create_sql(db_path)
facts = (
session
.query(Fact)
.all()
)
self._fact_lookup = {f.id: f for f in facts}
verify_checksum(dataset['db_checksum'], db_path)
# store = CuriosityStore(db_path)
# fact_lookup = store.get_fact_lookup()
# TODO: Add in facts
for _, d in enumerate(dialogs):
yield self.text_to_instance(d)
session.close()
@overrides
def text_to_instance(self, dialog: Dict, ignore_fact: bool = False):
msg_texts = []
msg_senders = []
msg_likes = []
msg_acts = []
msg_act_mask = []
msg_facts = []
msg_fact_labels = []
metadata_fact_labels = []
if len(dialog['messages']) == 0:
raise ValueError('There are no dialog messages')
known_entities = [
Token(text='ENTITY/' + t.replace(' ', '_'), idx=idx)
for idx, t in enumerate(dialog['known_entities'])
]
if len(known_entities) == 0:
known_entities.append(Token(text='@@YOUKNOWNOTHING@@', idx=0))
known_entities_field = TextField(known_entities, self._mention_indexers)
focus_entity = dialog['focus_entity']
focus_entity_field = TextField(
[Token(text='ENTITY/' + focus_entity.replace(' ', '_'), idx=0)],
self._mention_indexers
)
prev_msg = ''
for msg in dialog['messages']:
if MESSAGE_CUMULATIVE:
if prev_msg == '':
cur_message = msg['message']
else:
if len(prev_msg) > DIALOG_MAX_LENGTH:
prev_msg = ' '.join(prev_msg[-DIALOG_MAX_LENGTH:].split(' ')[1:])
cur_message = prev_msg + ' ' + msg['message']
prev_msg = cur_message
else:
cur_message = msg['message']
tokenized_msg = self._tokenizer.tokenize(cur_message)
msg_texts.append(TextField(tokenized_msg, self._token_indexers))
msg_senders.append(0 if msg['sender'] == USER else 1)
msg_likes.append(LabelField(
'liked' if msg['liked'] else 'not_liked',
label_namespace='like_labels'
))
if msg['dialog_acts'] is None:
dialog_acts = ['@@NODA@@']
act_mask = 0
else:
dialog_acts = msg['dialog_acts']
act_mask = 1
dialog_acts_field = MultiLabelFieldListCompat(
dialog_acts, label_namespace=DIALOG_ACT_LABELS)
msg_acts.append(dialog_acts_field)
msg_act_mask.append(act_mask)
curr_facts_text = []
curr_facts_labels = []
curr_metadata_fact_labels = []
if msg['sender'] == ASSISTANT:
for idx, f in enumerate(msg['facts']):
if ignore_fact:
fact_text = 'dummy fact'
else:
fact = self._fact_lookup[f['fid']]
fact_text = fact.text
# TODO: These are already space tokenized
tokenized_fact = self._tokenizer.tokenize(fact_text)
# 99% of text length is 77
tokenized_fact = tokenized_fact[:DIALOG_MAX_LENGTH]
curr_facts_text.append(
TextField(tokenized_fact, self._token_indexers)
)
if f['used']:
curr_facts_labels.append(idx)
curr_metadata_fact_labels.append(idx)
else:
# Users don't have facts, but lets avoid divide by zero
curr_facts_text.append(TextField(
[Token(text='@@NOFACT@@', idx=0)],
self._token_indexers
))
msg_facts.append(ListField(curr_facts_text))
# Add in a label if there are no correct indices
if len(curr_facts_labels) == 0:
curr_metadata_fact_labels.append(-1)
n_facts = len(curr_facts_text)
fact_label_arr = np.zeros(n_facts, dtype=np.float32)
if len(curr_facts_labels) > 0:
fact_label_arr[curr_facts_labels] = 1
msg_fact_labels.append(ArrayField(fact_label_arr, dtype=np.float32))
metadata_fact_labels.append(curr_metadata_fact_labels)
return Instance({
'messages': ListField(msg_texts),
'facts': ListField(msg_facts),
'fact_labels': ListField(msg_fact_labels),
'likes': ListField(msg_likes),
'dialog_acts': ListField(msg_acts),
'dialog_acts_mask': to_long_field(msg_act_mask),
'senders': to_long_field(msg_senders),
'focus_entity': focus_entity_field,
'known_entities': known_entities_field,
'metadata': MetadataField({
'dialog_id': dialog['dialog_id'],
'n_message': len(msg_texts),
'fact_labels': metadata_fact_labels,
'known_entities': dialog['known_entities'],
'focus_entity': dialog['focus_entity']
})
})
@DatasetReader.register('multi_turn_baseline_curiosity_dialog')
class MultiTurnBaselineCuriosityDialogReader(DatasetReader):
def __init__(self,
tokenizer: Tokenizer = None,
mention_tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
mention_indexers: Dict[str, TokenIndexer] = None):
super().__init__()
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {
'tokens': SingleIdTokenIndexer(lowercase_tokens=True),
}
self._mention_indexers = mention_indexers or {
'mentions': SingleIdTokenIndexer(),
}
self._mention_tokenizer = mention_tokenizer or WordTokenizer(
word_splitter=JustSpacesWordSplitter(),
)
self._fact_lookup: Optional[Dict[int, Fact]] = None
@overrides
def _read(self, file_path: str):
"""
file_path should point to a curiosity dialog file. In addition,
the directory that contains that file should also contain the
sqlite database associated with the dialogs named as below
- wiki_sql.sqlite.db
The intent is that there are
"""
with open(file_path) as f:
dataset = json.load(f)
dialogs = dataset['dialogs']
directory = os.path.dirname(file_path)
db_path = os.path.join(directory, 'wiki_sql.sqlite.db')
engine, session = create_sql(db_path)
facts = (
session
.query(Fact)
.all()
)
self._fact_lookup = {f.id: f for f in facts}
verify_checksum(dataset['db_checksum'], db_path)
# store = CuriosityStore(db_path)
# fact_lookup = store.get_fact_lookup()
# TODO: Add in facts
for _, d in enumerate(dialogs):
yield self.text_to_instance(d)
session.close()
@overrides
def text_to_instance(self, dialog: Dict, ignore_fact: bool = False):
msg_texts = []
msg_senders = []
msg_likes = []
msg_acts = []
msg_act_mask = []
msg_facts = []
msg_fact_labels = []
metadata_fact_labels = []
if len(dialog['messages']) == 0:
raise ValueError('There are no dialog messages')
known_entities = [
Token(text='ENTITY/' + t.replace(' ', '_'), idx=idx)
for idx, t in enumerate(dialog['known_entities'])
]
if len(known_entities) == 0:
known_entities.append(Token(text='@@YOUKNOWNOTHING@@', idx=0))
known_entities_field = TextField(known_entities, self._mention_indexers)
focus_entity = dialog['focus_entity']
focus_entity_field = TextField(
[Token(text='ENTITY/' + focus_entity.replace(' ', '_'), idx=0)],
self._mention_indexers
)
prev_msg = ''
for msg in dialog['messages']:
if True:
if prev_msg == '':
cur_message = msg['message']
else:
if len(prev_msg) > DIALOG_MAX_LENGTH:
prev_msg = ' '.join(prev_msg[-DIALOG_MAX_LENGTH:].split(' ')[1:])
cur_message = prev_msg + ' ' + msg['message']
prev_msg = cur_message
else:
cur_message = msg['message']
tokenized_msg = self._tokenizer.tokenize(cur_message)
msg_texts.append(TextField(tokenized_msg, self._token_indexers))
msg_senders.append(0 if msg['sender'] == USER else 1)
msg_likes.append(LabelField(
'liked' if msg['liked'] else 'not_liked',
label_namespace='like_labels'
))
if msg['dialog_acts'] is None:
dialog_acts = ['@@NODA@@']
act_mask = 0
else:
dialog_acts = msg['dialog_acts']
act_mask = 1
dialog_acts_field = MultiLabelFieldListCompat(
dialog_acts, label_namespace=DIALOG_ACT_LABELS)
msg_acts.append(dialog_acts_field)
msg_act_mask.append(act_mask)
curr_facts_text = []
curr_facts_labels = []
curr_metadata_fact_labels = []
if msg['sender'] == ASSISTANT:
for idx, f in enumerate(msg['facts']):
if ignore_fact:
fact_text = 'dummy fact'
else:
fact = self._fact_lookup[f['fid']]
fact_text = fact.text
# TODO: These are already space tokenized
tokenized_fact = self._tokenizer.tokenize(fact_text)
# 99% of text length is 77
tokenized_fact = tokenized_fact[:DIALOG_MAX_LENGTH]
curr_facts_text.append(
TextField(tokenized_fact, self._token_indexers)
)
if f['used']:
curr_facts_labels.append(idx)
curr_metadata_fact_labels.append(idx)
else:
# Users don't have facts, but lets avoid divide by zero
curr_facts_text.append(TextField(
[Token(text='@@NOFACT@@', idx=0)],
self._token_indexers
))
msg_facts.append(ListField(curr_facts_text))
# Add in a label if there are no correct indices
if len(curr_facts_labels) == 0:
curr_metadata_fact_labels.append(-1)
n_facts = len(curr_facts_text)
fact_label_arr = np.zeros(n_facts, dtype=np.float32)
if len(curr_facts_labels) > 0:
fact_label_arr[curr_facts_labels] = 1
msg_fact_labels.append(ArrayField(fact_label_arr, dtype=np.float32))
metadata_fact_labels.append(curr_metadata_fact_labels)
return Instance({
'messages': ListField(msg_texts),
'facts': ListField(msg_facts),
'fact_labels': ListField(msg_fact_labels),
'likes': ListField(msg_likes),
'dialog_acts': ListField(msg_acts),
'dialog_acts_mask': to_long_field(msg_act_mask),
'senders': to_long_field(msg_senders),
'focus_entity': focus_entity_field,
'known_entities': known_entities_field,
'metadata': MetadataField({
'dialog_id': dialog['dialog_id'],
'n_message': len(msg_texts),
'fact_labels': metadata_fact_labels,
'known_entities': dialog['known_entities'],
'focus_entity': dialog['focus_entity']
})
})
|
curiosity-main
|
curiosity/baseline_reader.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Dict, List, Tuple, Optional
import numpy
from overrides import overrides
import torch
import torch.nn.functional as F
from torch.nn.modules.linear import Linear
from torch.nn.modules.rnn import LSTMCell
from torch.nn import EmbeddingBag, Sequential
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.attention import LegacyAttention
from allennlp.modules import Attention, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.modules.similarity_functions import SimilarityFunction
from allennlp.models.model import Model
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.feedforward import FeedForward
from allennlp.nn import util, RegularizerApplicator
from allennlp.nn.beam_search import BeamSearch
from allennlp.training.metrics import BLEU
@Model.register("curiosity_paraphrase_seq2seq")
class FactParaphraseSeq2Seq(Model):
"""
Given facts and dialog acts, it generates the paraphrased message.
TODO: add dialog & dialog acts history
This implementation is based off the default SimpleSeq2Seq model,
which takes a sequence, encodes it, and then uses the encoded
representations to decode another sequence.
"""
def __init__(
self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
source_encoder: Seq2SeqEncoder,
max_decoding_steps: int,
dialog_acts_encoder: FeedForward = None,
attention: Attention = None,
attention_function: SimilarityFunction = None,
n_dialog_acts: int = None,
beam_size: int = None,
target_namespace: str = "tokens",
target_embedding_dim: int = None,
scheduled_sampling_ratio: float = 0.0,
use_bleu: bool = True,
use_dialog_acts: bool = True,
regularizers: Optional[RegularizerApplicator] = None,
) -> None:
super().__init__(vocab, regularizers)
self._target_namespace = target_namespace
self._scheduled_sampling_ratio = scheduled_sampling_ratio
# We need the start symbol to provide as the input at the first
# timestep of decoding, and end symbol as a way to indicate the end
# of the decoded sequence.
self._start_index = self.vocab.get_token_index(
START_SYMBOL, self._target_namespace
)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
if use_bleu:
pad_index = self.vocab.get_token_index(
self.vocab._padding_token, self._target_namespace
)
self._bleu = BLEU(
exclude_indices={pad_index, self._end_index, self._start_index}
)
else:
self._bleu = None
# At prediction time, we use a beam search to find the most
# likely sequence of target tokens.
beam_size = beam_size or 1
self._max_decoding_steps = max_decoding_steps
self._beam_search = BeamSearch(
self._end_index, max_steps=max_decoding_steps, beam_size=beam_size
)
# Dense embedding of source (Facts) vocab tokens.
self._source_embedder = source_embedder
# Encodes the sequence of source embeddings into a sequence of hidden states.
self._source_encoder = source_encoder
if use_dialog_acts:
# Dense embedding of dialog acts.
da_embedding_dim = dialog_acts_encoder.get_input_dim()
self._dialog_acts_embedder = EmbeddingBag(n_dialog_acts, da_embedding_dim)
# Encodes dialog acts
self._dialog_acts_encoder = dialog_acts_encoder
else:
self._dialog_acts_embedder = None
self._dialog_acts_encoder = None
num_classes = self.vocab.get_vocab_size(self._target_namespace)
# Attention mechanism applied to the encoder output for each step.
if attention:
if attention_function:
raise ConfigurationError(
"You can only specify an attention module or an "
"attention function, but not both."
)
self._attention = attention
elif attention_function:
self._attention = LegacyAttention(attention_function)
else:
self._attention = None
# Dense embedding of vocab words in the target space.
target_embedding_dim = target_embedding_dim or source_embedder.get_output_dim()
self._target_embedder = Embedding(num_classes, target_embedding_dim)
# Decoder output dim needs to be the same as the encoder output dim
# since we initialize the hidden state of the decoder with the final
# hidden state of the encoder.
self._encoder_output_dim = self._source_encoder.get_output_dim()
if use_dialog_acts:
self._merge_encoder = Sequential(
Linear(
self._source_encoder.get_output_dim()
+ self._dialog_acts_encoder.get_output_dim(),
self._encoder_output_dim,
)
)
self._decoder_output_dim = self._encoder_output_dim
if self._attention:
# If using attention, a weighted average over encoder outputs will
# be concatenated to the previous target embedding to form the input
# to the decoder at each time step.
self._decoder_input_dim = self._decoder_output_dim + target_embedding_dim
else:
# Otherwise, the input to the decoder is just the previous target embedding.
self._decoder_input_dim = target_embedding_dim
# We'll use an LSTM cell as the recurrent cell that produces a hidden state
# for the decoder at each time step.
# TODO (pradeep): Do not hardcode decoder cell type.
self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)
# We project the hidden state from the decoder into the output vocabulary space
# in order to get log probabilities of each target token, at each time step.
self._output_projection_layer = Linear(self._decoder_output_dim, num_classes)
def take_step(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take a decoding step. This is called by the beam search class.
"""
# shape: (group_size, num_classes)
output_projections, state = self._prepare_output_projections(
last_predictions, state
)
# shape: (group_size, num_classes)
class_log_probabilities = F.log_softmax(output_projections, dim=-1)
return class_log_probabilities, state
@overrides
def forward(
self, # type: ignore
source_tokens: Dict[str, torch.LongTensor],
target_tokens: Dict[str, torch.LongTensor] = None,
dialog_acts: Optional[torch.Tensor] = None,
sender: Optional[torch.Tensor] = None,
metadata: Optional[Dict] = None,
) -> Dict[str, torch.Tensor]:
"""
Make foward pass with decoder logic for producing the entire target sequence.
"""
source_state, dialog_acts_state = self._encode(source_tokens, dialog_acts)
if target_tokens:
state = self._init_decoder_state(source_state, dialog_acts_state)
# The `_forward_loop` decodes the input sequence and
# computes the loss during training and validation.
output_dict = self._forward_loop(state, target_tokens)
else:
output_dict = {}
if not self.training:
state = self._init_decoder_state(source_state, dialog_acts_state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens and self._bleu:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
self._bleu(best_predictions, target_tokens["tokens"])
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Finalize predictions.
"""
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for indices in predicted_indices:
# Beam search gives us the top k results for each source sentence
# in the batch but we just want the single best.
if len(indices.shape) > 1:
indices = indices[0]
indices = list(indices)
# Collect indices till the first end_symbol
if self._end_index in indices:
indices = indices[: indices.index(self._end_index)]
predicted_tokens = [
self.vocab.get_token_from_index(x, namespace=self._target_namespace)
for x in indices
]
all_predicted_tokens.append(predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def _encode(
self, source_tokens: Dict[str, torch.Tensor], dialog_acts: torch.Tensor = None
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
# Encode source tokens
source_state = self._encode_source_tokens(source_tokens)
# Encode dialog acts
if self._dialog_acts_encoder:
dialog_acts_state = self._encode_dialog_acts(dialog_acts)
else:
dialog_acts_state = None
return (source_state, dialog_acts_state)
def _encode_source_tokens(
self, source_tokens: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = self._source_encoder(embedded_input, source_mask)
return {"source_mask": source_mask, "encoder_outputs": encoder_outputs}
def _encode_dialog_acts(self, dialog_acts: torch.Tensor) -> torch.Tensor:
# shape: (batch_size, dialog_acts_embeddings_size)
embedded_dialog_acts = self._dialog_acts_embedder(dialog_acts)
# shape: (batch_size, dim_encoder)
dialog_acts_state = self._dialog_acts_encoder(embedded_dialog_acts)
return dialog_acts_state
def _init_decoder_state(
self,
source_state: Dict[str, torch.Tensor],
dialog_acts_state: torch.Tensor = None,
) -> Dict[str, torch.Tensor]:
batch_size = source_state["source_mask"].size(0)
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
source_state["encoder_outputs"],
source_state["source_mask"],
self._source_encoder.is_bidirectional(),
)
# Condition the source tokens state with dialog acts state
if self._dialog_acts_encoder:
final_encoder_output = self._merge_encoder(
torch.cat([final_encoder_output, dialog_acts_state], dim=1)
)
# Initialize the decoder hidden state with the final output of the encoder.
# shape: (batch_size, decoder_output_dim)
source_state["decoder_hidden"] = final_encoder_output
# shape: (batch_size, decoder_output_dim)
source_state["decoder_context"] = source_state["encoder_outputs"].new_zeros(
batch_size, self._decoder_output_dim
)
return source_state
def _forward_loop(
self,
state: Dict[str, torch.Tensor],
target_tokens: Dict[str, torch.LongTensor] = None,
) -> Dict[str, torch.Tensor]:
"""
Make forward pass during training or do greedy search during prediction.
Notes
-----
We really only use the predictions from the method to test that beam search
with a beam size of 1 gives the same results.
"""
# shape: (batch_size, max_input_sequence_length)
source_mask = state["source_mask"]
batch_size = source_mask.size()[0]
if target_tokens:
# shape: (batch_size, max_target_sequence_length)
targets = target_tokens["tokens"]
_, target_sequence_length = targets.size()
# The last input from the target is either padding or the end symbol.
# Either way, we don't have to process it.
num_decoding_steps = target_sequence_length - 1
else:
num_decoding_steps = self._max_decoding_steps
# Initialize target predictions with the start index.
# shape: (batch_size,)
last_predictions = source_mask.new_full(
(batch_size,), fill_value=self._start_index
)
step_logits: List[torch.Tensor] = []
step_predictions: List[torch.Tensor] = []
for timestep in range(num_decoding_steps):
if self.training and torch.rand(1).item() < self._scheduled_sampling_ratio:
# Use gold tokens at test time and at a rate of
# 1 - _scheduled_sampling_ratio during training.
# shape: (batch_size,)
input_choices = last_predictions
elif not target_tokens:
# shape: (batch_size,)
input_choices = last_predictions
else:
# shape: (batch_size,)
input_choices = targets[:, timestep]
# shape: (batch_size, num_classes)
output_projections, state = self._prepare_output_projections(
input_choices, state
)
# list of tensors, shape: (batch_size, 1, num_classes)
step_logits.append(output_projections.unsqueeze(1))
# shape: (batch_size, num_classes)
class_probabilities = F.softmax(output_projections, dim=-1)
# shape (predicted_classes): (batch_size,)
_, predicted_classes = torch.max(class_probabilities, 1)
# shape (predicted_classes): (batch_size,)
last_predictions = predicted_classes
step_predictions.append(last_predictions.unsqueeze(1))
# shape: (batch_size, num_decoding_steps)
predictions = torch.cat(step_predictions, 1)
output_dict = {"predictions": predictions}
if target_tokens:
# shape: (batch_size, num_decoding_steps, num_classes)
logits = torch.cat(step_logits, 1)
# Compute loss.
target_mask = util.get_text_field_mask(target_tokens)
loss = self._get_loss(logits, targets, target_mask)
output_dict["loss"] = loss
return output_dict
def _forward_beam_search(
self, state: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""Make forward pass during prediction using a beam search."""
batch_size = state["source_mask"].size()[0]
start_predictions = state["source_mask"].new_full(
(batch_size,), fill_value=self._start_index
)
# shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)
# shape (log_probabilities): (batch_size, beam_size)
all_top_k_predictions, log_probabilities = self._beam_search.search(
start_predictions, state, self.take_step
)
output_dict = {
"class_log_probabilities": log_probabilities,
"predictions": all_top_k_predictions,
}
return output_dict
def _prepare_output_projections(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Decode current state and last prediction to produce produce projections
into the target space, which can then be used to get probabilities of
each target token for the next step.
Inputs are the same as for `take_step()`.
"""
# shape: (group_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (group_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (group_size, decoder_output_dim)
decoder_hidden = state["decoder_hidden"]
# shape: (group_size, decoder_output_dim)
decoder_context = state["decoder_context"]
# shape: (group_size, target_embedding_dim)
embedded_input = self._target_embedder(last_predictions)
if self._attention:
# shape: (group_size, encoder_output_dim)
attended_input = self._prepare_attended_input(
decoder_hidden, encoder_outputs, source_mask
)
# shape: (group_size, decoder_output_dim + target_embedding_dim)
decoder_input = torch.cat((attended_input, embedded_input), -1)
else:
# shape: (group_size, target_embedding_dim)
decoder_input = embedded_input
# shape (decoder_hidden): (batch_size, decoder_output_dim)
# shape (decoder_context): (batch_size, decoder_output_dim)
decoder_hidden, decoder_context = self._decoder_cell(
decoder_input, (decoder_hidden, decoder_context)
)
state["decoder_hidden"] = decoder_hidden
state["decoder_context"] = decoder_context
# shape: (group_size, num_classes)
output_projections = self._output_projection_layer(decoder_hidden)
return output_projections, state
def _prepare_attended_input(
self,
decoder_hidden_state: torch.LongTensor = None,
encoder_outputs: torch.LongTensor = None,
encoder_outputs_mask: torch.LongTensor = None,
) -> torch.Tensor:
"""Apply attention over encoder outputs and decoder state."""
# Ensure mask is also a FloatTensor. Or else the multiplication within
# attention will complain.
# shape: (batch_size, max_input_sequence_length)
encoder_outputs_mask = encoder_outputs_mask.float()
# shape: (batch_size, max_input_sequence_length)
input_weights = self._attention(
decoder_hidden_state, encoder_outputs, encoder_outputs_mask
)
# shape: (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, input_weights)
return attended_input
@staticmethod
def _get_loss(
logits: torch.LongTensor,
targets: torch.LongTensor,
target_mask: torch.LongTensor,
) -> torch.Tensor:
# shape: (batch_size, num_decoding_steps)
relevant_targets = targets[:, 1:].contiguous()
# shape: (batch_size, num_decoding_steps)
relevant_mask = target_mask[:, 1:].contiguous()
return util.sequence_cross_entropy_with_logits(
logits, relevant_targets, relevant_mask
)
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if self._bleu and not self.training:
all_metrics.update(self._bleu.get_metric(reset=reset))
return all_metrics
|
curiosity-main
|
curiosity/paraphrase_models.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from curiosity.reader import CuriosityDialogReader, USER, ASSISTANT
def test_text_to_instance():
facts_0 = [
{"fid": 1, "used": True},
{"fid": 1, "used": False},
{"fid": 1, "used": False},
]
facts_1 = [
{"fid": 1, "used": False},
{"fid": 1, "used": False},
{"fid": 1, "used": False},
]
facts_2 = [
{"fid": 1, "used": False},
{"fid": 1, "used": True},
{"fid": 1, "used": True},
]
messages = [
{"sender": USER, "message": "first text", "liked": False},
{
"sender": ASSISTANT,
"message": "second text",
"liked": True,
"facts": facts_0,
},
{"sender": USER, "message": "third text", "liked": False},
{
"sender": ASSISTANT,
"message": "fourth text",
"liked": True,
"facts": facts_1,
},
{"sender": USER, "message": "fifth text", "liked": False},
{
"sender": ASSISTANT,
"message": "sixth text",
"liked": False,
"facts": facts_2,
},
]
dialog = {"messages": messages, "dialog_id": 0}
instance = CuriosityDialogReader().text_to_instance(dialog, ignore_fact=True)
like_labels = [l.label for l in instance["likes"]]
assert like_labels == [
"not_liked",
"liked",
"not_liked",
"liked",
"not_liked",
"not_liked",
]
fact_labels = instance["fact_labels"]
# Users have 1 dummy fact
assert len(fact_labels[0].array) == 1
assert len(fact_labels[2].array) == 1
assert len(fact_labels[4].array) == 1
assert fact_labels[0].array[0] == 0
assert fact_labels[2].array[0] == 0
assert fact_labels[4].array[0] == 0
assert list(fact_labels[1].array) == [1, 0, 0]
assert list(fact_labels[3].array) == [0, 0, 0]
assert list(fact_labels[5].array) == [0, 1, 1]
|
curiosity-main
|
curiosity/tests/test_reader.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import pytest
from curiosity.metrics import MeanReciprocalRank
def test_mrr():
logits = torch.tensor([1, 2, 0.5, 0, 4, 3]).reshape(1, 1, -1)
labels = torch.tensor([0, 1, 0, 0, 0, 1]).reshape(1, 1, -1)
mask = torch.tensor([1]).reshape(1, 1, -1)
metric = MeanReciprocalRank()
mrr = metric(logits, labels, mask)
# predicted order of documents
# preds: 4, 5, 1, 0, 2, 3
# True doc idxs: 1, 5
# +1 is to make first position/index correspond to rank 1
# Perfect score is 1
# MRR of true docs: 1 / (2 + 1) + 1 / (1 + 1) = 1 / 3 + 1 / 2
# relevant ranks:
assert pytest.approx(1 / 3 + 1 / 2, mrr.item())
|
curiosity-main
|
curiosity/tests/test_metrics.py
|
curiosity-main
|
curiosity/tests/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import glob
import os
import shutil
from os import path
from typing import List
from setuptools import find_packages, setup
cwd = os.path.dirname(os.path.abspath(__file__))
version = "0.0.1"
try:
if not os.getenv("RELEASE"):
from datetime import date
today = date.today()
day = today.strftime("b%Y%m%d")
version += day
except Exception:
pass
requirements = [
"importlib",
"numpy",
"Pillow",
"mock",
"torch",
"pytorch-lightning==1.8.6",
"opencv-python",
"parameterized",
# Downgrade the protobuf package to 3.20.x or lower, related:
# https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates
# https://github.com/protocolbuffers/protobuf/issues/10051
"protobuf==3.20.2",
]
def d2go_gather_files(dst_module, file_path, extension="*") -> List[str]:
"""
Return a list of files to include in d2go submodule. Copy over the corresponding files.
"""
# Use absolute paths while symlinking.
source_configs_dir = path.join(path.dirname(path.realpath(__file__)), file_path)
destination = path.join(path.dirname(path.realpath(__file__)), "d2go", dst_module)
# Symlink the config directory inside package to have a cleaner pip install.
# Remove stale symlink/directory from a previous build.
if path.exists(source_configs_dir):
if path.islink(destination):
os.unlink(destination)
elif path.isdir(destination):
shutil.rmtree(destination)
if not path.exists(destination):
try:
os.symlink(source_configs_dir, destination)
except OSError:
# Fall back to copying if symlink fails: ex. on Windows.
shutil.copytree(source_configs_dir, destination)
config_paths = glob.glob(os.path.join(file_path + extension), recursive=True)
return config_paths
if __name__ == "__main__":
setup(
name="d2go",
version=version,
author="Mobile Vision",
url="https://github.com/facebookresearch/d2go",
description="D2Go",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
license="Apache-2.0",
install_requires=requirements,
packages=find_packages(exclude=["tools", "tests"]),
package_data={
"d2go": [
"LICENSE",
],
"d2go.configs": d2go_gather_files("configs", "configs", "**/*.yaml"),
"d2go.tools": d2go_gather_files("tools", "tools", "**/*.py"),
"d2go.tests": d2go_gather_files("tests", "tests", "**/*helper.py"),
},
entry_points={
"console_scripts": [
"d2go.exporter = d2go.tools.exporter:cli",
"d2go.train_net = d2go.tools.train_net:cli",
]
},
)
|
d2go-main
|
setup.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.