code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def sample(self, batch_size): """Returns a dict {key: array(batch_size x shapes[key])} """ buffers = {} with self.lock: assert self.current_size > 0 for key in self.buffers.keys(): buffers[key] = self.buffers[key][:self.current_size] buffers['o_2'] = buffers['o'][:, 1:, :] buffers['ag_2'] = buffers['ag'][:, 1:, :] transitions = self.sample_transitions(buffers, batch_size) for key in (['r', 'o_2', 'ag_2'] + list(self.buffers.keys())): assert key in transitions, "key %s missing from transitions" % key return transitions
Returns a dict {key: array(batch_size x shapes[key])}
sample
python
openai/baselines
baselines/her/replay_buffer.py
https://github.com/openai/baselines/blob/master/baselines/her/replay_buffer.py
MIT
def store_episode(self, episode_batch): """episode_batch: array(batch_size x (T or T+1) x dim_key) """ batch_sizes = [len(episode_batch[key]) for key in episode_batch.keys()] assert np.all(np.array(batch_sizes) == batch_sizes[0]) batch_size = batch_sizes[0] with self.lock: idxs = self._get_storage_idx(batch_size) # load inputs into buffers for key in self.buffers.keys(): self.buffers[key][idxs] = episode_batch[key] self.n_transitions_stored += batch_size * self.T
episode_batch: array(batch_size x (T or T+1) x dim_key)
store_episode
python
openai/baselines
baselines/her/replay_buffer.py
https://github.com/openai/baselines/blob/master/baselines/her/replay_buffer.py
MIT
def store_args(method): """Stores provided method args as instance attributes. """ argspec = inspect.getfullargspec(method) defaults = {} if argspec.defaults is not None: defaults = dict( zip(argspec.args[-len(argspec.defaults):], argspec.defaults)) if argspec.kwonlydefaults is not None: defaults.update(argspec.kwonlydefaults) arg_names = argspec.args[1:] @functools.wraps(method) def wrapper(*positional_args, **keyword_args): self = positional_args[0] # Get default arg values args = defaults.copy() # Add provided arg values for name, value in zip(arg_names, positional_args[1:]): args[name] = value args.update(keyword_args) self.__dict__.update(args) return method(*positional_args, **keyword_args) return wrapper
Stores provided method args as instance attributes.
store_args
python
openai/baselines
baselines/her/util.py
https://github.com/openai/baselines/blob/master/baselines/her/util.py
MIT
def import_function(spec): """Import a function identified by a string like "pkg.module:fn_name". """ mod_name, fn_name = spec.split(':') module = importlib.import_module(mod_name) fn = getattr(module, fn_name) return fn
Import a function identified by a string like "pkg.module:fn_name".
import_function
python
openai/baselines
baselines/her/util.py
https://github.com/openai/baselines/blob/master/baselines/her/util.py
MIT
def flatten_grads(var_list, grads): """Flattens a variables and their gradients. """ return tf.concat([tf.reshape(grad, [U.numel(v)]) for (v, grad) in zip(var_list, grads)], 0)
Flattens a variables and their gradients.
flatten_grads
python
openai/baselines
baselines/her/util.py
https://github.com/openai/baselines/blob/master/baselines/her/util.py
MIT
def nn(input, layers_sizes, reuse=None, flatten=False, name=""): """Creates a simple neural network """ for i, size in enumerate(layers_sizes): activation = tf.nn.relu if i < len(layers_sizes) - 1 else None input = tf.layers.dense(inputs=input, units=size, kernel_initializer=tf.contrib.layers.xavier_initializer(), reuse=reuse, name=name + '_' + str(i)) if activation: input = activation(input) if flatten: assert layers_sizes[-1] == 1 input = tf.reshape(input, [-1]) return input
Creates a simple neural network
nn
python
openai/baselines
baselines/her/util.py
https://github.com/openai/baselines/blob/master/baselines/her/util.py
MIT
def mpi_fork(n, extra_mpi_args=[]): """Re-launches the current script with workers Returns "parent" for original parent, "child" for MPI children """ if n <= 1: return "child" if os.getenv("IN_MPI") is None: env = os.environ.copy() env.update( MKL_NUM_THREADS="1", OMP_NUM_THREADS="1", IN_MPI="1" ) # "-bind-to core" is crucial for good performance args = ["mpirun", "-np", str(n)] + \ extra_mpi_args + \ [sys.executable] args += sys.argv subprocess.check_call(args, env=env) return "parent" else: install_mpi_excepthook() return "child"
Re-launches the current script with workers Returns "parent" for original parent, "child" for MPI children
mpi_fork
python
openai/baselines
baselines/her/util.py
https://github.com/openai/baselines/blob/master/baselines/her/util.py
MIT
def convert_episode_to_batch_major(episode): """Converts an episode to have the batch dimension in the major (first) dimension. """ episode_batch = {} for key in episode.keys(): val = np.array(episode[key]).copy() # make inputs batch-major instead of time-major episode_batch[key] = val.swapaxes(0, 1) return episode_batch
Converts an episode to have the batch dimension in the major (first) dimension.
convert_episode_to_batch_major
python
openai/baselines
baselines/her/util.py
https://github.com/openai/baselines/blob/master/baselines/her/util.py
MIT
def transitions_in_episode_batch(episode_batch): """Number of transitions in a given episode batch. """ shape = episode_batch['u'].shape return shape[0] * shape[1]
Number of transitions in a given episode batch.
transitions_in_episode_batch
python
openai/baselines
baselines/her/util.py
https://github.com/openai/baselines/blob/master/baselines/her/util.py
MIT
def reshape_for_broadcasting(source, target): """Reshapes a tensor (source) to have the correct shape and dtype of the target before broadcasting it with MPI. """ dim = len(target.get_shape()) shape = ([1] * (dim - 1)) + [-1] return tf.reshape(tf.cast(source, target.dtype), shape)
Reshapes a tensor (source) to have the correct shape and dtype of the target before broadcasting it with MPI.
reshape_for_broadcasting
python
openai/baselines
baselines/her/util.py
https://github.com/openai/baselines/blob/master/baselines/her/util.py
MIT
def cached_make_env(make_env): """ Only creates a new environment from the provided function if one has not yet already been created. This is useful here because we need to infer certain properties of the env, e.g. its observation and action spaces, without any intend of actually using it. """ if make_env not in CACHED_ENVS: env = make_env() CACHED_ENVS[make_env] = env return CACHED_ENVS[make_env]
Only creates a new environment from the provided function if one has not yet already been created. This is useful here because we need to infer certain properties of the env, e.g. its observation and action spaces, without any intend of actually using it.
cached_make_env
python
openai/baselines
baselines/her/experiment/config.py
https://github.com/openai/baselines/blob/master/baselines/her/experiment/config.py
MIT
def naughty_strings(filepath=FILEPATH): """Get the list of naughty_strings. By default this will get the strings from the blns.txt file Code is a simple port of what is already in the /scripts directory :param filepath: Optional filepath the the blns.txt file :returns: The list of naughty strings """ strings = [] with open(filepath, 'r') as f: # put all lines in the file into a Python list strings = f.readlines() # above line leaves trailing newline characters; strip them out strings = [x.strip(u'\n') for x in strings] # remove empty-lines and comments strings = [x for x in strings if x and not x.startswith(u'#')] # insert empty string since all are being removed strings.insert(0, u"") return strings
Get the list of naughty_strings. By default this will get the strings from the blns.txt file Code is a simple port of what is already in the /scripts directory :param filepath: Optional filepath the the blns.txt file :returns: The list of naughty strings
naughty_strings
python
minimaxir/big-list-of-naughty-strings
naughtystrings/__init__.py
https://github.com/minimaxir/big-list-of-naughty-strings/blob/master/naughtystrings/__init__.py
MIT
def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append]
Appends dimensions to the end of a tensor until it has target_dims dimensions.
append_dims
python
instantX-research/InstantIR
train_previewer_lora.py
https://github.com/instantX-research/InstantIR/blob/master/train_previewer_lora.py
Apache-2.0
def tensor_to_pil(images): """ Convert image tensor or a batch of image tensors to PIL image(s). """ images = images.clamp(0, 1) images_np = images.detach().cpu().numpy() if images_np.ndim == 4: images_np = np.transpose(images_np, (0, 2, 3, 1)) elif images_np.ndim == 3: images_np = np.transpose(images_np, (1, 2, 0)) images_np = images_np[None, ...] images_np = (images_np * 255).round().astype("uint8") if images_np.shape[-1] == 1: # special case for grayscale (single channel) images pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images_np] else: pil_images = [Image.fromarray(image[:, :, :3]) for image in images_np] return pil_images
Convert image tensor or a batch of image tensors to PIL image(s).
tensor_to_pil
python
instantX-research/InstantIR
infer.py
https://github.com/instantX-research/InstantIR/blob/master/infer.py
Apache-2.0
def calc_mean_std(feat, eps=1e-5): """Calculate mean and std for adaptive_instance_normalization. Args: feat (Tensor): 4D tensor. eps (float): A small value added to the variance to avoid divide-by-zero. Default: 1e-5. """ size = feat.size() assert len(size) == 4, 'The input feature should be 4D tensor.' b, c = size[:2] feat_var = feat.view(b, c, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(b, c, 1, 1) feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1) return feat_mean, feat_std
Calculate mean and std for adaptive_instance_normalization. Args: feat (Tensor): 4D tensor. eps (float): A small value added to the variance to avoid divide-by-zero. Default: 1e-5.
calc_mean_std
python
instantX-research/InstantIR
infer.py
https://github.com/instantX-research/InstantIR/blob/master/infer.py
Apache-2.0
def fourier_filter(x_in: "torch.Tensor", threshold: int, scale: int) -> "torch.Tensor": """Fourier filter as introduced in FreeU (https://arxiv.org/abs/2309.11497). This version of the method comes from here: https://github.com/huggingface/diffusers/pull/5164#issuecomment-1732638706 """ x = x_in B, C, H, W = x.shape # Non-power of 2 images must be float32 if (W & (W - 1)) != 0 or (H & (H - 1)) != 0: x = x.to(dtype=torch.float32) # FFT x_freq = fftn(x, dim=(-2, -1)) x_freq = fftshift(x_freq, dim=(-2, -1)) B, C, H, W = x_freq.shape mask = torch.ones((B, C, H, W), device=x.device) crow, ccol = H // 2, W // 2 mask[..., crow - threshold : crow + threshold, ccol - threshold : ccol + threshold] = scale x_freq = x_freq * mask # IFFT x_freq = ifftshift(x_freq, dim=(-2, -1)) x_filtered = ifftn(x_freq, dim=(-2, -1)).real return x_filtered.to(dtype=x_in.dtype)
Fourier filter as introduced in FreeU (https://arxiv.org/abs/2309.11497). This version of the method comes from here: https://github.com/huggingface/diffusers/pull/5164#issuecomment-1732638706
fourier_filter
python
instantX-research/InstantIR
module/min_sdxl.py
https://github.com/instantX-research/InstantIR/blob/master/module/min_sdxl.py
Apache-2.0
def apply_freeu( resolution_idx: int, hidden_states: "torch.Tensor", res_hidden_states: "torch.Tensor", **freeu_kwargs): """Applies the FreeU mechanism as introduced in https: //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU. Args: resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied. hidden_states (`torch.Tensor`): Inputs to the underlying block. res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block. s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ if resolution_idx == 0: num_half_channels = hidden_states.shape[1] // 2 hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b1"] res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s1"]) if resolution_idx == 1: num_half_channels = hidden_states.shape[1] // 2 hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b2"] res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s2"]) return hidden_states, res_hidden_states
Applies the FreeU mechanism as introduced in https: //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU. Args: resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied. hidden_states (`torch.Tensor`): Inputs to the underlying block. res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block. s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
apply_freeu
python
instantX-research/InstantIR
module/min_sdxl.py
https://github.com/instantX-research/InstantIR/blob/master/module/min_sdxl.py
Apache-2.0
def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor)
Disables custom attention processors and sets the default attention implementation.
set_default_attn_processor
python
instantX-research/InstantIR
module/aggregator.py
https://github.com/instantX-research/InstantIR/blob/master/module/aggregator.py
Apache-2.0
def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: torch.FloatTensor, cat_dim: int = -2, conditioning_scale: float = 1.0, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[AggregatorOutput, Tuple[Tuple[torch.FloatTensor, ...], torch.FloatTensor]]: """ The [`Aggregator`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor. timestep (`Union[torch.Tensor, float, int]`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states. controlnet_cond (`torch.FloatTensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. added_cond_kwargs (`dict`): Additional conditions for the Stable Diffusion XL UNet. cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): A kwargs dictionary that if specified is passed along to the `AttnProcessor`. return_dict (`bool`, defaults to `True`): Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. Returns: [`~models.controlnet.ControlNetOutput`] **or** `tuple`: If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # check channel order channel_order = self.config.controlnet_conditioning_channel_order if channel_order == "rgb": # in rgb order by default ... else: raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb if self.config.addition_embed_type is not None: if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_time": if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) emb = emb + aug_emb if aug_emb is not None else emb encoder_hidden_states = self.process_encoder_hidden_states( encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs ) # 2. prepare input cond_latent = self.conv_in(sample) ref_latent = self.ref_conv_in(controlnet_cond) batch_size, channel, height, width = cond_latent.shape if self.pad_concat: if cat_dim == -2 or cat_dim == 2: concat_pad = torch.zeros(batch_size, channel, 1, width) elif cat_dim == -1 or cat_dim == 3: concat_pad = torch.zeros(batch_size, channel, height, 1) else: raise ValueError(f"Aggregator shall concat along spatial dimension, but is asked to concat dim: {cat_dim}.") concat_pad = concat_pad.to(cond_latent.device, dtype=cond_latent.dtype) sample = torch.cat([cond_latent, concat_pad, ref_latent], dim=cat_dim) else: sample = torch.cat([cond_latent, ref_latent], dim=cat_dim) # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, cross_attention_kwargs=cross_attention_kwargs, ) # rebuild sample: split and concat if self.pad_concat: batch_size, channel, height, width = sample.shape if cat_dim == -2 or cat_dim == 2: cond_latent = sample[:, :, :height//2, :] ref_latent = sample[:, :, -(height//2):, :] concat_pad = torch.zeros(batch_size, channel, 1, width) elif cat_dim == -1 or cat_dim == 3: cond_latent = sample[:, :, :, :width//2] ref_latent = sample[:, :, :, -(width//2):] concat_pad = torch.zeros(batch_size, channel, height, 1) concat_pad = concat_pad.to(cond_latent.device, dtype=cond_latent.dtype) sample = torch.cat([cond_latent, concat_pad, ref_latent], dim=cat_dim) res_samples = res_samples[:-1] + (sample,) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: sample = self.mid_block( sample, emb, cross_attention_kwargs=cross_attention_kwargs, ) # 5. split samples and SFT. controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): batch_size, channel, height, width = down_block_res_sample.shape if cat_dim == -2 or cat_dim == 2: cond_latent = down_block_res_sample[:, :, :height//2, :] ref_latent = down_block_res_sample[:, :, -(height//2):, :] elif cat_dim == -1 or cat_dim == 3: cond_latent = down_block_res_sample[:, :, :, :width//2] ref_latent = down_block_res_sample[:, :, :, -(width//2):] down_block_res_sample = controlnet_block((cond_latent, ref_latent), ) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples batch_size, channel, height, width = sample.shape if cat_dim == -2 or cat_dim == 2: cond_latent = sample[:, :, :height//2, :] ref_latent = sample[:, :, -(height//2):, :] elif cat_dim == -1 or cat_dim == 3: cond_latent = sample[:, :, :, :width//2] ref_latent = sample[:, :, :, -(width//2):] mid_block_res_sample = self.controlnet_mid_block((cond_latent, ref_latent), ) # 6. scaling down_block_res_samples = [sample*conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample*conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples ] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample) return AggregatorOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample )
The [`Aggregator`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor. timestep (`Union[torch.Tensor, float, int]`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states. controlnet_cond (`torch.FloatTensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. added_cond_kwargs (`dict`): Additional conditions for the Stable Diffusion XL UNet. cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): A kwargs dictionary that if specified is passed along to the `AttnProcessor`. return_dict (`bool`, defaults to `True`): Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. Returns: [`~models.controlnet.ControlNetOutput`] **or** `tuple`: If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor.
forward
python
instantX-research/InstantIR
module/aggregator.py
https://github.com/instantX-research/InstantIR/blob/master/module/aggregator.py
Apache-2.0
def forward(self, x, latents): """ Args: x (torch.Tensor): image features shape (b, n1, D) latent (torch.Tensor): latent features shape (b, n2, D) """ x = self.norm1(x) latents = self.norm2(latents) b, l, _ = latents.shape q = self.to_q(latents) kv_input = torch.cat((x, latents), dim=-2) k, v = self.to_kv(kv_input).chunk(2, dim=-1) q = reshape_tensor(q, self.heads) k = reshape_tensor(k, self.heads) v = reshape_tensor(v, self.heads) # attention scale = 1 / math.sqrt(math.sqrt(self.dim_head)) weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) out = weight @ v out = out.permute(0, 2, 1, 3).reshape(b, l, -1) return self.to_out(out)
Args: x (torch.Tensor): image features shape (b, n1, D) latent (torch.Tensor): latent features shape (b, n2, D)
forward
python
instantX-research/InstantIR
module/ip_adapter/resampler.py
https://github.com/instantX-research/InstantIR/blob/master/module/ip_adapter/resampler.py
Apache-2.0
def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor)
Disables custom attention processors and sets the default attention implementation.
set_default_attn_processor
python
instantX-research/InstantIR
module/unet/unet_2d_ZeroSFT.py
https://github.com/instantX-research/InstantIR/blob/master/module/unet/unet_2d_ZeroSFT.py
Apache-2.0
def disable_freeu(self): """Disables the FreeU mechanism.""" freeu_keys = {"s1", "s2", "b1", "b2"} for i, upsample_block in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None)
Disables the FreeU mechanism.
disable_freeu
python
instantX-research/InstantIR
module/unet/unet_2d_ZeroSFT.py
https://github.com/instantX-research/InstantIR/blob/master/module/unet/unet_2d_ZeroSFT.py
Apache-2.0
def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True)
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip>
fuse_qkv_projections
python
instantX-research/InstantIR
module/unet/unet_2d_ZeroSFT.py
https://github.com/instantX-research/InstantIR/blob/master/module/unet/unet_2d_ZeroSFT.py
Apache-2.0
def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors)
Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip>
unfuse_qkv_projections
python
instantX-research/InstantIR
module/unet/unet_2d_ZeroSFT.py
https://github.com/instantX-research/InstantIR/blob/master/module/unet/unet_2d_ZeroSFT.py
Apache-2.0
def unload_lora(self): """Unloads LoRA weights.""" deprecate( "unload_lora", "0.28.0", "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().", ) for module in self.modules(): if hasattr(module, "set_lora_layer"): module.set_lora_layer(None)
Unloads LoRA weights.
unload_lora
python
instantX-research/InstantIR
module/unet/unet_2d_ZeroSFT.py
https://github.com/instantX-research/InstantIR/blob/master/module/unet/unet_2d_ZeroSFT.py
Apache-2.0
def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor, _remove_lora=True)
Disables custom attention processors and sets the default attention implementation.
set_default_attn_processor
python
instantX-research/InstantIR
module/diffusers_vae/autoencoder_kl.py
https://github.com/instantX-research/InstantIR/blob/master/module/diffusers_vae/autoencoder_kl.py
Apache-2.0
def encode( self, x: torch.FloatTensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.FloatTensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(x, return_dict=return_dict) if self.use_slicing and x.shape[0] > 1: encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior)
Encode a batch of images into latents. Args: x (`torch.FloatTensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
encode
python
instantX-research/InstantIR
module/diffusers_vae/autoencoder_kl.py
https://github.com/instantX-research/InstantIR/blob/master/module/diffusers_vae/autoencoder_kl.py
Apache-2.0
def decode( self, z: torch.FloatTensor, return_dict: bool = True, generator=None ) -> Union[DecoderOutput, torch.FloatTensor]: """ Decode a batch of images. Args: z (`torch.FloatTensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded)
Decode a batch of images. Args: z (`torch.FloatTensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned.
decode
python
instantX-research/InstantIR
module/diffusers_vae/autoencoder_kl.py
https://github.com/instantX-research/InstantIR/blob/master/module/diffusers_vae/autoencoder_kl.py
Apache-2.0
def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True)
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip>
fuse_qkv_projections
python
instantX-research/InstantIR
module/diffusers_vae/autoencoder_kl.py
https://github.com/instantX-research/InstantIR/blob/master/module/diffusers_vae/autoencoder_kl.py
Apache-2.0
def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors)
Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip>
unfuse_qkv_projections
python
instantX-research/InstantIR
module/diffusers_vae/autoencoder_kl.py
https://github.com/instantX-research/InstantIR/blob/master/module/diffusers_vae/autoencoder_kl.py
Apache-2.0
def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32)
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
betas_for_alpha_bar
python
instantX-research/InstantIR
schedulers/lcm_single_step_scheduler.py
https://github.com/instantX-research/InstantIR/blob/master/schedulers/lcm_single_step_scheduler.py
Apache-2.0
def rescale_zero_terminal_snr(betas: torch.FloatTensor) -> torch.FloatTensor: """ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) Args: betas (`torch.FloatTensor`): the betas that the scheduler is being initialized with. Returns: `torch.FloatTensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas
Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) Args: betas (`torch.FloatTensor`): the betas that the scheduler is being initialized with. Returns: `torch.FloatTensor`: rescaled betas with zero terminal SNR
rescale_zero_terminal_snr
python
instantX-research/InstantIR
schedulers/lcm_single_step_scheduler.py
https://github.com/instantX-research/InstantIR/blob/master/schedulers/lcm_single_step_scheduler.py
Apache-2.0
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample.
scale_model_input
python
instantX-research/InstantIR
schedulers/lcm_single_step_scheduler.py
https://github.com/instantX-research/InstantIR/blob/master/schedulers/lcm_single_step_scheduler.py
Apache-2.0
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample
"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487
_threshold_sample
python
instantX-research/InstantIR
schedulers/lcm_single_step_scheduler.py
https://github.com/instantX-research/InstantIR/blob/master/schedulers/lcm_single_step_scheduler.py
Apache-2.0
def set_timesteps( self, num_inference_steps: int = None, device: Union[str, torch.device] = None, original_inference_steps: Optional[int] = None, strength: int = 1.0, timesteps: Optional[list] = None, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. original_inference_steps (`int`, *optional*): The original number of inference steps, which will be used to generate a linearly-spaced timestep schedule (which is different from the standard `diffusers` implementation). We will then take `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute. """ if num_inference_steps is not None and timesteps is not None: raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") if timesteps is not None: for i in range(1, len(timesteps)): if timesteps[i] >= timesteps[i - 1]: raise ValueError("`custom_timesteps` must be in descending order.") if timesteps[0] >= self.config.num_train_timesteps: raise ValueError( f"`timesteps` must start before `self.config.train_timesteps`:" f" {self.config.num_train_timesteps}." ) timesteps = np.array(timesteps, dtype=np.int64) else: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) self.num_inference_steps = num_inference_steps original_steps = ( original_inference_steps if original_inference_steps is not None else self.config.original_inference_steps ) if original_steps > self.config.num_train_timesteps: raise ValueError( f"`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) if num_inference_steps > original_steps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:" f" {original_steps} because the final timestep schedule will be a subset of the" f" `original_inference_steps`-sized initial timestep schedule." ) # LCM Timesteps Setting # Currently, only linear spacing is supported. c = self.config.num_train_timesteps // original_steps # LCM Training Steps Schedule lcm_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * c - 1 skipping_step = len(lcm_origin_timesteps) // num_inference_steps # LCM Inference Steps Schedule timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] self.timesteps = torch.from_numpy(timesteps.copy()).to(device=device, dtype=torch.long) self._step_index = None
Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. original_inference_steps (`int`, *optional*): The original number of inference steps, which will be used to generate a linearly-spaced timestep schedule (which is different from the standard `diffusers` implementation). We will then take `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute.
set_timesteps
python
instantX-research/InstantIR
schedulers/lcm_single_step_scheduler.py
https://github.com/instantX-research/InstantIR/blob/master/schedulers/lcm_single_step_scheduler.py
Apache-2.0
def append_dims(self, x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append]
Appends dimensions to the end of a tensor until it has target_dims dimensions.
append_dims
python
instantX-research/InstantIR
schedulers/lcm_single_step_scheduler.py
https://github.com/instantX-research/InstantIR/blob/master/schedulers/lcm_single_step_scheduler.py
Apache-2.0
def step( self, model_output: torch.FloatTensor, timestep: torch.Tensor, sample: torch.FloatTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[LCMSingleStepSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # 0. make sure everything is on the same device alphas_cumprod = self.alphas_cumprod.to(sample.device) # 1. compute alphas, betas if timestep.ndim == 0: timestep = timestep.unsqueeze(0) alpha_prod_t = self.extract_into_tensor(alphas_cumprod, timestep, sample.shape) beta_prod_t = 1 - alpha_prod_t # 2. Get scalings for boundary conditions c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep) c_skip, c_out = [self.append_dims(x, sample.ndim) for x in [c_skip, c_out]] # 3. Compute the predicted original sample x_0 based on the model parameterization if self.config.prediction_type == "epsilon": # noise-prediction predicted_original_sample = (sample - torch.sqrt(beta_prod_t) * model_output) / torch.sqrt(alpha_prod_t) elif self.config.prediction_type == "sample": # x-prediction predicted_original_sample = model_output elif self.config.prediction_type == "v_prediction": # v-prediction predicted_original_sample = torch.sqrt(alpha_prod_t) * sample - torch.sqrt(beta_prod_t) * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" " `v_prediction` for `LCMScheduler`." ) # 4. Clip or threshold "predicted x_0" if self.config.thresholding: predicted_original_sample = self._threshold_sample(predicted_original_sample) elif self.config.clip_sample: predicted_original_sample = predicted_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 5. Denoise model output using boundary conditions denoised = c_out * predicted_original_sample + c_skip * sample if not return_dict: return (denoised, ) return LCMSingleStepSchedulerOutput(denoised=denoised)
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor.
step
python
instantX-research/InstantIR
schedulers/lcm_single_step_scheduler.py
https://github.com/instantX-research/InstantIR/blob/master/schedulers/lcm_single_step_scheduler.py
Apache-2.0
def extract_feats(self, x: torch.Tensor): """ Extract features from the face of the image using facenet model. """ x = self.face_pool(x) x_feats = self.facenet(x) return x_feats
Extract features from the face of the image using facenet model.
extract_feats
python
instantX-research/InstantIR
losses/losses.py
https://github.com/instantX-research/InstantIR/blob/master/losses/losses.py
Apache-2.0
def forward(self, input): """Extents the input batch with augmentations If the input is consists of images [I1, I2] the extended augmented output will be [I1_resized, I2_resized, I1_aug1, I2_aug1, I1_aug2, I2_aug2 ...] Args: input ([type]): input batch of shape [batch, C, H, W] Returns: updated batch: of shape [batch * augmentations_number, C, H, W] """ # We want to multiply the number of images in the batch in contrast to regular augmantations # that do not change the number of samples in the batch) resized_images = self.avg_pool(input) resized_images = torch.tile(resized_images, dims=(self.augmentations_number, 1, 1, 1)) batch_size = input.shape[0] # We want at least one non augmented image non_augmented_batch = resized_images[:batch_size] augmented_batch = self.augmentations(resized_images[batch_size:]) updated_batch = torch.cat([non_augmented_batch, augmented_batch], dim=0) return updated_batch
Extents the input batch with augmentations If the input is consists of images [I1, I2] the extended augmented output will be [I1_resized, I2_resized, I1_aug1, I2_aug1, I1_aug2, I2_aug2 ...] Args: input ([type]): input batch of shape [batch, C, H, W] Returns: updated batch: of shape [batch * augmentations_number, C, H, W]
forward
python
instantX-research/InstantIR
losses/losses.py
https://github.com/instantX-research/InstantIR/blob/master/losses/losses.py
Apache-2.0
def update_center(self, teacher_output): """ Update center used for teacher output. """ batch_center = torch.sum(teacher_output, dim=0, keepdim=True) self.accelerator.reduce(batch_center, reduction="sum") batch_center = batch_center / (len(teacher_output) * self.accelerator.num_processes) # ema update self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
Update center used for teacher output.
update_center
python
instantX-research/InstantIR
losses/losses.py
https://github.com/instantX-research/InstantIR/blob/master/losses/losses.py
Apache-2.0
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
rescale_noise_cfg
python
instantX-research/InstantIR
pipelines/stage1_sdxl_pipeline.py
https://github.com/instantX-research/InstantIR/blob/master/pipelines/stage1_sdxl_pipeline.py
Apache-2.0
def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps.
retrieve_timesteps
python
instantX-research/InstantIR
pipelines/stage1_sdxl_pipeline.py
https://github.com/instantX-research/InstantIR/blob/master/pipelines/stage1_sdxl_pipeline.py
Apache-2.0
def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.FloatTensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
get_guidance_scale_embedding
python
instantX-research/InstantIR
pipelines/stage1_sdxl_pipeline.py
https://github.com/instantX-research/InstantIR/blob/master/pipelines/stage1_sdxl_pipeline.py
Apache-2.0
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
rescale_noise_cfg
python
instantX-research/InstantIR
pipelines/sdxl_instantir.py
https://github.com/instantX-research/InstantIR/blob/master/pipelines/sdxl_instantir.py
Apache-2.0
def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps.
retrieve_timesteps
python
instantX-research/InstantIR
pipelines/sdxl_instantir.py
https://github.com/instantX-research/InstantIR/blob/master/pipelines/sdxl_instantir.py
Apache-2.0
def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.FloatTensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
get_guidance_scale_embedding
python
instantX-research/InstantIR
pipelines/sdxl_instantir.py
https://github.com/instantX-research/InstantIR/blob/master/pipelines/sdxl_instantir.py
Apache-2.0
def importance_sampling_fn(t, max_t, alpha): """Importance Sampling Function f(t)""" return 1 / max_t * (1 - alpha * np.cos(np.pi * t / max_t))
Importance Sampling Function f(t)
importance_sampling_fn
python
instantX-research/InstantIR
utils/train_utils.py
https://github.com/instantX-research/InstantIR/blob/master/utils/train_utils.py
Apache-2.0
def tensor_to_pil(images): """ Convert image tensor or a batch of image tensors to PIL image(s). """ images = (images + 1) / 2 images_np = images.detach().cpu().numpy() if images_np.ndim == 4: images_np = np.transpose(images_np, (0, 2, 3, 1)) elif images_np.ndim == 3: images_np = np.transpose(images_np, (1, 2, 0)) images_np = images_np[None, ...] images_np = (images_np * 255).round().astype("uint8") if images_np.shape[-1] == 1: # special case for grayscale (single channel) images pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images_np] else: pil_images = [Image.fromarray(image[:, :, :3]) for image in images_np] return pil_images
Convert image tensor or a batch of image tensors to PIL image(s).
tensor_to_pil
python
instantX-research/InstantIR
utils/train_utils.py
https://github.com/instantX-research/InstantIR/blob/master/utils/train_utils.py
Apache-2.0
def detect_face(images: torch.Tensor, mtcnn: torch.nn.Module) -> torch.Tensor: """ Detect faces in the images using MTCNN. If no face is detected, use the whole image. """ images = rearrange(images, "b c h w -> b h w c") if images.dtype != torch.uint8: images = ((images * 0.5 + 0.5) * 255).type(torch.uint8) # Unnormalize _, _, landmarks = mtcnn(images, landmarks=True) return landmarks
Detect faces in the images using MTCNN. If no face is detected, use the whole image.
detect_face
python
instantX-research/InstantIR
utils/utils.py
https://github.com/instantX-research/InstantIR/blob/master/utils/utils.py
Apache-2.0
def extract_faces_and_landmarks(images: torch.Tensor, output_size=112, mtcnn: torch.nn.Module = None, refernce_points=REFERNCE_FACIAL_POINTS_RELATIVE): """ detect faces in the images and crop them (in a differentiable way) to 112x112 using MTCNN. """ images = Pad(200)(images) landmarks_batched = detect_face(images, mtcnn=mtcnn) affine_transformations = [] invalid_indices = [] for i, landmarks in enumerate(landmarks_batched): if landmarks is None: invalid_indices.append(i) affine_transformations.append(np.eye(2, 3).astype(np.float32)) else: affine_transformations.append(get_similarity_transform_for_cv2(landmarks[0].astype(np.float32), refernce_points.astype(np.float32) * output_size)) affine_transformations = torch.from_numpy(np.stack(affine_transformations).astype(np.float32)).to(device=images.device, dtype=torch.float32) invalid_indices = torch.tensor(invalid_indices).to(device=images.device) fp_images = images.to(torch.float32) return warp_affine(fp_images, affine_transformations, dsize=(output_size, output_size)).to(dtype=images.dtype), invalid_indices
detect faces in the images and crop them (in a differentiable way) to 112x112 using MTCNN.
extract_faces_and_landmarks
python
instantX-research/InstantIR
utils/utils.py
https://github.com/instantX-research/InstantIR/blob/master/utils/utils.py
Apache-2.0
def tformfwd(trans, uv): """ Function: ---------- apply affine transform 'trans' to uv Parameters: ---------- @trans: 3x3 np.array transform matrix @uv: Kx2 np.array each row is a pair of coordinates (x, y) Returns: ---------- @xy: Kx2 np.array each row is a pair of transformed coordinates (x, y) """ uv = np.hstack(( uv, np.ones((uv.shape[0], 1)) )) xy = np.dot(uv, trans) xy = xy[:, 0:-1] return xy
Function: ---------- apply affine transform 'trans' to uv Parameters: ---------- @trans: 3x3 np.array transform matrix @uv: Kx2 np.array each row is a pair of coordinates (x, y) Returns: ---------- @xy: Kx2 np.array each row is a pair of transformed coordinates (x, y)
tformfwd
python
instantX-research/InstantIR
utils/matlab_cp2tform.py
https://github.com/instantX-research/InstantIR/blob/master/utils/matlab_cp2tform.py
Apache-2.0
def tforminv(trans, uv): """ Function: ---------- apply the inverse of affine transform 'trans' to uv Parameters: ---------- @trans: 3x3 np.array transform matrix @uv: Kx2 np.array each row is a pair of coordinates (x, y) Returns: ---------- @xy: Kx2 np.array each row is a pair of inverse-transformed coordinates (x, y) """ Tinv = inv(trans) xy = tformfwd(Tinv, uv) return xy
Function: ---------- apply the inverse of affine transform 'trans' to uv Parameters: ---------- @trans: 3x3 np.array transform matrix @uv: Kx2 np.array each row is a pair of coordinates (x, y) Returns: ---------- @xy: Kx2 np.array each row is a pair of inverse-transformed coordinates (x, y)
tforminv
python
instantX-research/InstantIR
utils/matlab_cp2tform.py
https://github.com/instantX-research/InstantIR/blob/master/utils/matlab_cp2tform.py
Apache-2.0
def get_similarity_transform(src_pts, dst_pts, reflective=True): """ Function: ---------- Find Similarity Transform Matrix 'trans': u = src_pts[:, 0] v = src_pts[:, 1] x = dst_pts[:, 0] y = dst_pts[:, 1] [x, y, 1] = [u, v, 1] * trans Parameters: ---------- @src_pts: Kx2 np.array source points, each row is a pair of coordinates (x, y) @dst_pts: Kx2 np.array destination points, each row is a pair of transformed coordinates (x, y) @reflective: True or False if True: use reflective similarity transform else: use non-reflective similarity transform Returns: ---------- @trans: 3x3 np.array transform matrix from uv to xy trans_inv: 3x3 np.array inverse of trans, transform matrix from xy to uv """ if reflective: trans, trans_inv = findSimilarity(src_pts, dst_pts) else: trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts) return trans, trans_inv
Function: ---------- Find Similarity Transform Matrix 'trans': u = src_pts[:, 0] v = src_pts[:, 1] x = dst_pts[:, 0] y = dst_pts[:, 1] [x, y, 1] = [u, v, 1] * trans Parameters: ---------- @src_pts: Kx2 np.array source points, each row is a pair of coordinates (x, y) @dst_pts: Kx2 np.array destination points, each row is a pair of transformed coordinates (x, y) @reflective: True or False if True: use reflective similarity transform else: use non-reflective similarity transform Returns: ---------- @trans: 3x3 np.array transform matrix from uv to xy trans_inv: 3x3 np.array inverse of trans, transform matrix from xy to uv
get_similarity_transform
python
instantX-research/InstantIR
utils/matlab_cp2tform.py
https://github.com/instantX-research/InstantIR/blob/master/utils/matlab_cp2tform.py
Apache-2.0
def cvt_tform_mat_for_cv2(trans): """ Function: ---------- Convert Transform Matrix 'trans' into 'cv2_trans' which could be directly used by cv2.warpAffine(): u = src_pts[:, 0] v = src_pts[:, 1] x = dst_pts[:, 0] y = dst_pts[:, 1] [x, y].T = cv_trans * [u, v, 1].T Parameters: ---------- @trans: 3x3 np.array transform matrix from uv to xy Returns: ---------- @cv2_trans: 2x3 np.array transform matrix from src_pts to dst_pts, could be directly used for cv2.warpAffine() """ cv2_trans = trans[:, 0:2].T return cv2_trans
Function: ---------- Convert Transform Matrix 'trans' into 'cv2_trans' which could be directly used by cv2.warpAffine(): u = src_pts[:, 0] v = src_pts[:, 1] x = dst_pts[:, 0] y = dst_pts[:, 1] [x, y].T = cv_trans * [u, v, 1].T Parameters: ---------- @trans: 3x3 np.array transform matrix from uv to xy Returns: ---------- @cv2_trans: 2x3 np.array transform matrix from src_pts to dst_pts, could be directly used for cv2.warpAffine()
cvt_tform_mat_for_cv2
python
instantX-research/InstantIR
utils/matlab_cp2tform.py
https://github.com/instantX-research/InstantIR/blob/master/utils/matlab_cp2tform.py
Apache-2.0
def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True): """ Function: ---------- Find Similarity Transform Matrix 'cv2_trans' which could be directly used by cv2.warpAffine(): u = src_pts[:, 0] v = src_pts[:, 1] x = dst_pts[:, 0] y = dst_pts[:, 1] [x, y].T = cv_trans * [u, v, 1].T Parameters: ---------- @src_pts: Kx2 np.array source points, each row is a pair of coordinates (x, y) @dst_pts: Kx2 np.array destination points, each row is a pair of transformed coordinates (x, y) reflective: True or False if True: use reflective similarity transform else: use non-reflective similarity transform Returns: ---------- @cv2_trans: 2x3 np.array transform matrix from src_pts to dst_pts, could be directly used for cv2.warpAffine() """ trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective) cv2_trans = cvt_tform_mat_for_cv2(trans) return cv2_trans
Function: ---------- Find Similarity Transform Matrix 'cv2_trans' which could be directly used by cv2.warpAffine(): u = src_pts[:, 0] v = src_pts[:, 1] x = dst_pts[:, 0] y = dst_pts[:, 1] [x, y].T = cv_trans * [u, v, 1].T Parameters: ---------- @src_pts: Kx2 np.array source points, each row is a pair of coordinates (x, y) @dst_pts: Kx2 np.array destination points, each row is a pair of transformed coordinates (x, y) reflective: True or False if True: use reflective similarity transform else: use non-reflective similarity transform Returns: ---------- @cv2_trans: 2x3 np.array transform matrix from src_pts to dst_pts, could be directly used for cv2.warpAffine()
get_similarity_transform_for_cv2
python
instantX-research/InstantIR
utils/matlab_cp2tform.py
https://github.com/instantX-research/InstantIR/blob/master/utils/matlab_cp2tform.py
Apache-2.0
def __call__(self, img_gt, kernels=None): ''' :param: img_gt: BCHW, RGB, [0, 1] float32 tensor ''' if kernels is None: kernel = [] kernel2 = [] sinc_kernel = [] for _ in range(img_gt.shape[0]): k, k2, sk = self.get_kernel() kernel.append(k) kernel2.append(k2) sinc_kernel.append(sk) kernel = torch.stack(kernel) kernel2 = torch.stack(kernel2) sinc_kernel = torch.stack(sinc_kernel) else: # kernels created in dataset. kernel, kernel2, sinc_kernel = kernels # ----------------------- Pre-process ----------------------- # im_gt = img_gt.to(self.device) if self.degrade_opt['use_usm']: im_gt = self.usm_sharpener(im_gt) im_gt = im_gt.to(memory_format=torch.contiguous_format).float() kernel = kernel.to(self.device) kernel2 = kernel2.to(self.device) sinc_kernel = sinc_kernel.to(self.device) ori_h, ori_w = im_gt.size()[2:4] # ----------------------- The first degradation process ----------------------- # # blur out = filter2D(im_gt, kernel) # random resize updown_type = random.choices( ['up', 'down', 'keep'], self.degrade_opt['resize_prob'], )[0] if updown_type == 'up': scale = random.uniform(1, self.degrade_opt['resize_range'][1]) elif updown_type == 'down': scale = random.uniform(self.degrade_opt['resize_range'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = torch.nn.functional.interpolate(out, scale_factor=scale, mode=mode) # add noise gray_noise_prob = self.degrade_opt['gray_noise_prob'] if random.random() < self.degrade_opt['gaussian_noise_prob']: out = random_add_gaussian_noise_pt( out, sigma_range=self.degrade_opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob, ) else: out = random_add_poisson_noise_pt( out, scale_range=self.degrade_opt['poisson_scale_range'], gray_prob=gray_noise_prob, clip=True, rounds=False) # JPEG compression jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.degrade_opt['jpeg_range']) out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts out = self.jpeger(out, quality=jpeg_p) # ----------------------- The second degradation process ----------------------- # # blur if random.random() < self.degrade_opt['second_blur_prob']: out = out.contiguous() out = filter2D(out, kernel2) # random resize updown_type = random.choices( ['up', 'down', 'keep'], self.degrade_opt['resize_prob2'], )[0] if updown_type == 'up': scale = random.uniform(1, self.degrade_opt['resize_range2'][1]) elif updown_type == 'down': scale = random.uniform(self.degrade_opt['resize_range2'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = torch.nn.functional.interpolate( out, size=(int(ori_h / self.degrade_opt['sf'] * scale), int(ori_w / self.degrade_opt['sf'] * scale)), mode=mode, ) # add noise gray_noise_prob = self.degrade_opt['gray_noise_prob2'] if random.random() < self.degrade_opt['gaussian_noise_prob2']: out = random_add_gaussian_noise_pt( out, sigma_range=self.degrade_opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob, ) else: out = random_add_poisson_noise_pt( out, scale_range=self.degrade_opt['poisson_scale_range2'], gray_prob=gray_noise_prob, clip=True, rounds=False, ) # JPEG compression + the final sinc filter # We also need to resize images to desired sizes. We group [resize back + sinc filter] together # as one operation. # We consider two orders: # 1. [resize back + sinc filter] + JPEG compression # 2. JPEG compression + [resize back + sinc filter] # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. if random.random() < 0.5: # resize back + the final sinc filter mode = random.choice(['area', 'bilinear', 'bicubic']) out = torch.nn.functional.interpolate( out, size=(ori_h // self.degrade_opt['sf'], ori_w // self.degrade_opt['sf']), mode=mode, ) out = out.contiguous() out = filter2D(out, sinc_kernel) # JPEG compression jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.degrade_opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) else: # JPEG compression jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.degrade_opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) # resize back + the final sinc filter mode = random.choice(['area', 'bilinear', 'bicubic']) out = torch.nn.functional.interpolate( out, size=(ori_h // self.degrade_opt['sf'], ori_w // self.degrade_opt['sf']), mode=mode, ) out = out.contiguous() out = filter2D(out, sinc_kernel) # clamp and round im_lq = torch.clamp(out, 0, 1.0) # random crop gt_size = self.degrade_opt['gt_size'] im_gt, im_lq = paired_random_crop(im_gt, im_lq, gt_size, self.degrade_opt['sf']) if self.degrade_opt['resize_lq']: im_lq = torch.nn.functional.interpolate( im_lq, size=(im_gt.size(-2), im_gt.size(-1)), mode='bicubic', ) if random.random() < self.degrade_opt['no_degradation_prob'] or torch.isnan(im_lq).any(): im_lq = im_gt # sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue im_lq = im_lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract im_lq = im_lq*2 - 1.0 im_gt = im_gt*2 - 1.0 if self.degrade_opt['random_size']: raise NotImplementedError im_lq, im_gt = self.randn_cropinput(im_lq, im_gt) im_lq = torch.clamp(im_lq, -1.0, 1.0) im_gt = torch.clamp(im_gt, -1.0, 1.0) return (im_lq, im_gt)
:param: img_gt: BCHW, RGB, [0, 1] float32 tensor
__call__
python
instantX-research/InstantIR
utils/degradation_pipeline.py
https://github.com/instantX-research/InstantIR/blob/master/utils/degradation_pipeline.py
Apache-2.0
def test_finetune(): """Ensure that the initially loaded config is the one from the pretrained model.""" trainer = RAGTrainer( model_name="test", pretrained_model_name="colbert-ir/colbertv2.0", language_code="en", ) trainer_config = trainer.model.config assert ColBERTConfig() != trainer_config assert trainer_config.query_token == "[Q]" assert trainer_config.doc_token == "[D]" assert trainer_config.nbits == 1 assert trainer_config.kmeans_niters == 20 assert trainer_config.lr == 1e-05 assert trainer_config.relu is False assert trainer_config.nway == 64 assert trainer_config.doc_maxlen == 180 assert trainer_config.use_ib_negatives is True assert trainer_config.name == "kldR2.nway64.ib"
Ensure that the initially loaded config is the one from the pretrained model.
test_finetune
python
AnswerDotAI/RAGatouille
tests/test_trainer_loading.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/tests/test_trainer_loading.py
Apache-2.0
def time_limit(seconds): """Time limit context manager as in https://stackoverflow.com/a/601168""" def signal_handler(_, __): raise TimeoutException("Timed out!") signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0)
Time limit context manager as in https://stackoverflow.com/a/601168
time_limit
python
AnswerDotAI/RAGatouille
tests/test_training.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/tests/test_training.py
Apache-2.0
def test_training(tmp_path): """This test is based on the content of examples/02-basic_training.ipynb and mainly tests that there are no exceptions which can happen e.g. due to bugs in data processing. """ trainer = RAGTrainer( model_name="GhibliColBERT", pretrained_model_name="colbert-ir/colbertv2.0", language_code="en", ) pages = ["miyazaki", "Studio_Ghibli", "Toei_Animation"] my_full_corpus = [(DATA_DIR / f"{p}_wikipedia.txt").open().read() for p in pages] corpus_processor = CorpusProcessor( document_splitter_fn=llama_index_sentence_splitter ) documents = corpus_processor.process_corpus(my_full_corpus, chunk_size=256) queries = [ "What manga did Hayao Miyazaki write?", "which film made ghibli famous internationally", "who directed Spirited Away?", "when was Hikotei Jidai published?", "where's studio ghibli based?", "where is the ghibli museum?", ] pairs = [] for query in queries: fake_relevant_docs = random.sample(documents, 10) for doc in fake_relevant_docs: pairs.append((query, doc)) trainer.prepare_training_data( raw_data=pairs, data_out_path=str(tmp_path), all_documents=my_full_corpus, num_new_negatives=10, mine_hard_negatives=True, ) try: with time_limit(30): trainer.train( batch_size=32, nbits=4, # How many bits will the trained model use when compressing indexes maxsteps=1, # Maximum steps hard stop use_ib_negatives=True, # Use in-batch negative to calculate loss dim=128, # How many dimensions per embedding. 128 is the default and works well. learning_rate=5e-6, # Learning rate, small values ([3e-6,3e-5] work best if the base model is BERT-like, 5e-6 is often the sweet spot) doc_maxlen=256, # Maximum document length. Because of how ColBERT works, smaller chunks (128-256) work very well. use_relu=False, # Disable ReLU -- doesn't improve performance warmup_steps="auto", # Defaults to 10% ) # Simply test that some of the files generated have really been made. assert (tmp_path / "corpus.train.colbert.tsv").exists() except TimeoutException as e: print("Timed out!") raise AssertionError("Timout in training") from None
This test is based on the content of examples/02-basic_training.ipynb and mainly tests that there are no exceptions which can happen e.g. due to bugs in data processing.
test_training
python
AnswerDotAI/RAGatouille
tests/test_training.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/tests/test_training.py
Apache-2.0
def get_wikipedia_page(title: str): """ Retrieve the full text content of a Wikipedia page. :param title: str - Title of the Wikipedia page. :return: str - Full text content of the page as raw string. """ # Wikipedia API endpoint URL = "https://en.wikipedia.org/w/api.php" # Parameters for the API request params = { "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, } # Custom User-Agent header to comply with Wikipedia's best practices headers = {"User-Agent": "RAGatouille_tutorial/0.0.1 ([email protected])"} response = requests.get(URL, params=params, headers=headers) data = response.json() # Extracting page content page = next(iter(data["query"]["pages"].values())) return page["extract"] if "extract" in page else None
Retrieve the full text content of a Wikipedia page. :param title: str - Title of the Wikipedia page. :return: str - Full text content of the page as raw string.
get_wikipedia_page
python
AnswerDotAI/RAGatouille
ragatouille/utils.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/utils.py
Apache-2.0
def __init__( self, model_name: str, pretrained_model_name: str, language_code: str = "en", n_usable_gpus: int = -1, ): """ Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from. Parameters: model_name: str - Name of the model to train. This will be used to name the checkpoints and the index. pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name. language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index. n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available. Returns: self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised. """ self.model_name = model_name self.pretrained_model_name = pretrained_model_name self.language_code = language_code self.model: Union[LateInteractionModel, None] = ColBERT( pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus, training_mode=True, ) self.negative_miner: Union[HardNegativeMiner, None] = None self.collection: list[str] = [] self.queries: Union[list[str], None] = None self.raw_data: Union[list[tuple], list[list], None] = None self.training_triplets: list[list[int]] = list()
Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from. Parameters: model_name: str - Name of the model to train. This will be used to name the checkpoints and the index. pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name. language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index. n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available. Returns: self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised.
__init__
python
AnswerDotAI/RAGatouille
ragatouille/RAGTrainer.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGTrainer.py
Apache-2.0
def export_training_data(self, path: Union[str, Path]): """ Manually export the training data processed by prepare_training_data to a given path. Parameters: path: Union[str, Path] - Path to the directory where the data will be exported.""" self.data_processor.export_training_data(path)
Manually export the training data processed by prepare_training_data to a given path. Parameters: path: Union[str, Path] - Path to the directory where the data will be exported.
export_training_data
python
AnswerDotAI/RAGatouille
ragatouille/RAGTrainer.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGTrainer.py
Apache-2.0
def prepare_training_data( self, raw_data: Union[list[tuple], list[list]], all_documents: Optional[list[str]] = None, data_out_path: Union[str, Path] = "./data/", num_new_negatives: int = 10, hard_negative_minimum_rank: int = 10, mine_hard_negatives: bool = True, hard_negative_model_size: str = "small", pairs_with_labels: bool = False, positive_label: Union[int, str] = 1, negative_label: Union[int, str] = 0, ) -> str: """ Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets. Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings. Will process into a ColBERT-ready format and export to data_out_path. Will generate hard negatives if mine_hard_negatives is True. num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled. Parameters: raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings. all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives. data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory). num_new_negatives: int - Number of new negatives to generate for each query. mine_hard_negatives: bool - Whether to use hard negatives mining or not. hard_negative_model_size: str - Size of the model to use for hard negatives mining. pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not. positive_label: Union[int, str] - Label to use for positive pairs. negative_label: Union[int, str] - Label to use for negative pairs. Returns: data_out_path: Union[str, Path] - Path to the directory where the data has been exported. """ if all_documents is not None: self.collection += [doc for doc in all_documents if isinstance(doc, str)] self.data_dir = Path(data_out_path) sample = raw_data[0] if len(sample) == 2: data_type = "pairs" elif len(sample) == 3: if pairs_with_labels: data_type = "labeled_pairs" if sample[-1] not in [positive_label, negative_label]: raise ValueError(f"Invalid value for label: {sample}") else: data_type = "triplets" else: raise ValueError("Raw data must be a list of pairs or triplets of strings.") self.queries = set() for x in raw_data: if isinstance(x[0], str): self.queries.add(x[0]) else: raise ValueError("Queries must be a strings.") self._add_to_collection(x[1]) if data_type == "triplets": self._add_to_collection(x[2]) self.collection = list(set(self.collection)) seeded_shuffle(self.collection) if mine_hard_negatives: self.negative_miner = SimpleMiner( language_code=self.language_code, model_size=hard_negative_model_size, ) self.negative_miner.build_index(self.collection) self.data_processor = TrainingDataProcessor( collection=self.collection, queries=self.queries, negative_miner=self.negative_miner if mine_hard_negatives else None, ) self.data_processor.process_raw_data( data_type=data_type, raw_data=raw_data, export=True, data_dir=data_out_path, num_new_negatives=num_new_negatives, positive_label=positive_label, negative_label=negative_label, mine_hard_negatives=mine_hard_negatives, hard_negative_minimum_rank=hard_negative_minimum_rank, ) if len(self.data_processor.training_triplets) == 0: if mine_hard_negatives: print( "Warning: No training triplets were generated with setting mine_hard_negatives=='True'. This may be due to the data being too small or the hard negative miner not being able to find enough hard negatives." ) self.data_processor.process_raw_data( data_type=data_type, raw_data=raw_data, export=True, data_dir=data_out_path, num_new_negatives=num_new_negatives, positive_label=positive_label, negative_label=negative_label, mine_hard_negatives=False, hard_negative_minimum_rank=hard_negative_minimum_rank, ) else: raise ValueError("No training triplets were generated.") self.training_triplets = self.data_processor.training_triplets return data_out_path
Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets. Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings. Will process into a ColBERT-ready format and export to data_out_path. Will generate hard negatives if mine_hard_negatives is True. num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled. Parameters: raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings. all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives. data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory). num_new_negatives: int - Number of new negatives to generate for each query. mine_hard_negatives: bool - Whether to use hard negatives mining or not. hard_negative_model_size: str - Size of the model to use for hard negatives mining. pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not. positive_label: Union[int, str] - Label to use for positive pairs. negative_label: Union[int, str] - Label to use for negative pairs. Returns: data_out_path: Union[str, Path] - Path to the directory where the data has been exported.
prepare_training_data
python
AnswerDotAI/RAGatouille
ragatouille/RAGTrainer.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGTrainer.py
Apache-2.0
def train( self, batch_size: int = 32, nbits: int = 2, maxsteps: int = 500_000, use_ib_negatives: bool = True, learning_rate: float = 5e-6, dim: int = 128, doc_maxlen: int = 256, use_relu: bool = False, warmup_steps: Union[int, Literal["auto"]] = "auto", accumsteps: int = 1, ) -> str: """ Launch training or fine-tuning of a ColBERT model. Parameters: batch_size: int - Total batch size -- divice by n_usable_gpus for per-GPU batch size. nbits: int - number of bits used for vector compression by the traiened model. 2 is usually ideal. maxsteps: int - End training early after maxsteps steps. use_ib_negatives: bool - Whether to use in-batch negatives to calculate loss or not. learning_rate: float - ColBERT litterature usually has this performing best between 3e-6 - 2e-5 depending on data size dim: int - Size of individual vector representations. doc_maxlen: int - The maximum length after which passages will be truncated warmup_steps: Union[int, Literal["auto"]] - How many warmup steps to use for the learning rate. Auto will default to 10% of total steps accumsteps: How many gradient accummulation steps to use to simulate higher batch sizes. Returns: model_path: str - Path to the trained model. """ if not self.training_triplets: total_triplets = sum( 1 for _ in open(str(self.data_dir / "triples.train.colbert.jsonl"), "r") ) else: total_triplets = len(self.training_triplets) training_config = ColBERTConfig( bsize=batch_size, model_name=self.model_name, name=self.model_name, checkpoint=self.pretrained_model_name, use_ib_negatives=use_ib_negatives, maxsteps=maxsteps, nbits=nbits, lr=learning_rate, dim=dim, doc_maxlen=doc_maxlen, relu=use_relu, accumsteps=accumsteps, warmup=int(total_triplets // batch_size * 0.1) if warmup_steps == "auto" else warmup_steps, save_every=int(total_triplets // batch_size // 10), ) return self.model.train(data_dir=self.data_dir, training_config=training_config)
Launch training or fine-tuning of a ColBERT model. Parameters: batch_size: int - Total batch size -- divice by n_usable_gpus for per-GPU batch size. nbits: int - number of bits used for vector compression by the traiened model. 2 is usually ideal. maxsteps: int - End training early after maxsteps steps. use_ib_negatives: bool - Whether to use in-batch negatives to calculate loss or not. learning_rate: float - ColBERT litterature usually has this performing best between 3e-6 - 2e-5 depending on data size dim: int - Size of individual vector representations. doc_maxlen: int - The maximum length after which passages will be truncated warmup_steps: Union[int, Literal["auto"]] - How many warmup steps to use for the learning rate. Auto will default to 10% of total steps accumsteps: How many gradient accummulation steps to use to simulate higher batch sizes. Returns: model_path: str - Path to the trained model.
train
python
AnswerDotAI/RAGatouille
ragatouille/RAGTrainer.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGTrainer.py
Apache-2.0
def from_pretrained( cls, pretrained_model_name_or_path: Union[str, Path], n_gpu: int = -1, verbose: int = 1, index_root: Optional[str] = None, ): """Load a ColBERT model from a pre-trained checkpoint. Parameters: pretrained_model_name_or_path (str): Local path or huggingface model name. n_gpu (int): Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available. verbose (int): The level of ColBERT verbosity requested. By default, 1, which will filter out most internal logs. index_root (Optional[str]): The root directory where indexes will be stored. If None, will use the default directory, '.ragatouille/'. Returns: cls (RAGPretrainedModel): The current instance of RAGPretrainedModel, with the model initialised. """ instance = cls() instance.model = ColBERT( pretrained_model_name_or_path, n_gpu, index_root=index_root, verbose=verbose ) return instance
Load a ColBERT model from a pre-trained checkpoint. Parameters: pretrained_model_name_or_path (str): Local path or huggingface model name. n_gpu (int): Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available. verbose (int): The level of ColBERT verbosity requested. By default, 1, which will filter out most internal logs. index_root (Optional[str]): The root directory where indexes will be stored. If None, will use the default directory, '.ragatouille/'. Returns: cls (RAGPretrainedModel): The current instance of RAGPretrainedModel, with the model initialised.
from_pretrained
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def from_index( cls, index_path: Union[str, Path], n_gpu: int = -1, verbose: int = 1 ): """Load an Index and the associated ColBERT encoder from an existing document index. Parameters: index_path (Union[str, path]): Path to the index. n_gpu (int): Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available. verbose (int): The level of ColBERT verbosity requested. By default, 1, which will filter out most internal logs. Returns: cls (RAGPretrainedModel): The current instance of RAGPretrainedModel, with the model and index initialised. """ instance = cls() index_path = Path(index_path) instance.model = ColBERT( index_path, n_gpu, verbose=verbose, load_from_index=True ) return instance
Load an Index and the associated ColBERT encoder from an existing document index. Parameters: index_path (Union[str, path]): Path to the index. n_gpu (int): Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available. verbose (int): The level of ColBERT verbosity requested. By default, 1, which will filter out most internal logs. Returns: cls (RAGPretrainedModel): The current instance of RAGPretrainedModel, with the model and index initialised.
from_index
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def _process_corpus( self, collection: List[str], document_ids: List[str], document_metadatas: List[Dict[Any, Any]], document_splitter_fn: Optional[Callable[[str], List[str]]], preprocessing_fn: Optional[Callable[[str], str]], max_document_length: int, ) -> Tuple[List[str], Dict[int, str], Dict[str, Dict[Any, Any]]]: """ Processes a collection of documents by assigning unique IDs, splitting documents if necessary, applying preprocessing, and organizing metadata. """ document_ids, docid_metadata_map = self._process_metadata( document_ids=document_ids, document_metadatas=document_metadatas, collection_len=len(collection), ) if document_splitter_fn is not None or preprocessing_fn is not None: self.corpus_processor = CorpusProcessor( document_splitter_fn=document_splitter_fn, preprocessing_fn=preprocessing_fn, ) collection_with_ids = self.corpus_processor.process_corpus( collection, document_ids, chunk_size=max_document_length, ) else: collection_with_ids = [ {"document_id": x, "content": y} for x, y in zip(document_ids, collection) ] pid_docid_map = { index: item["document_id"] for index, item in enumerate(collection_with_ids) } collection = [x["content"] for x in collection_with_ids] return collection, pid_docid_map, docid_metadata_map
Processes a collection of documents by assigning unique IDs, splitting documents if necessary, applying preprocessing, and organizing metadata.
_process_corpus
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def index( self, collection: list[str], document_ids: Union[TypeVar("T"), List[TypeVar("T")]] = None, document_metadatas: Optional[list[dict]] = None, index_name: str = None, overwrite_index: Union[bool, str] = True, max_document_length: int = 256, split_documents: bool = True, document_splitter_fn: Optional[Callable] = llama_index_sentence_splitter, preprocessing_fn: Optional[Union[Callable, list[Callable]]] = None, bsize: int = 32, use_faiss: bool = False, ): """Build an index from a list of documents. Parameters: collection (list[str]): The collection of documents to index. document_ids (Optional[list[str]]): An optional list of document ids. Ids will be generated at index time if not supplied. index_name (str): The name of the index that will be built. overwrite_index (Union[bool, str]): Whether to overwrite an existing index with the same name. max_document_length (int): The maximum length of a document. Documents longer than this will be split into chunks. split_documents (bool): Whether to split documents into chunks. document_splitter_fn (Optional[Callable]): A function to split documents into chunks. If None and by default, will use the llama_index_sentence_splitter. preprocessing_fn (Optional[Union[Callable, list[Callable]]]): A function or list of functions to preprocess documents. If None and by default, will not preprocess documents. bsize (int): The batch size to use for encoding the passages. Returns: index (str): The path to the index that was built. """ if not split_documents: document_splitter_fn = None collection, pid_docid_map, docid_metadata_map = self._process_corpus( collection, document_ids, document_metadatas, document_splitter_fn, preprocessing_fn, max_document_length, ) return self.model.index( collection, pid_docid_map=pid_docid_map, docid_metadata_map=docid_metadata_map, index_name=index_name, max_document_length=max_document_length, overwrite=overwrite_index, bsize=bsize, use_faiss=use_faiss, )
Build an index from a list of documents. Parameters: collection (list[str]): The collection of documents to index. document_ids (Optional[list[str]]): An optional list of document ids. Ids will be generated at index time if not supplied. index_name (str): The name of the index that will be built. overwrite_index (Union[bool, str]): Whether to overwrite an existing index with the same name. max_document_length (int): The maximum length of a document. Documents longer than this will be split into chunks. split_documents (bool): Whether to split documents into chunks. document_splitter_fn (Optional[Callable]): A function to split documents into chunks. If None and by default, will use the llama_index_sentence_splitter. preprocessing_fn (Optional[Union[Callable, list[Callable]]]): A function or list of functions to preprocess documents. If None and by default, will not preprocess documents. bsize (int): The batch size to use for encoding the passages. Returns: index (str): The path to the index that was built.
index
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def add_to_index( self, new_collection: list[str], new_document_ids: Optional[Union[TypeVar("T"), List[TypeVar("T")]]] = None, new_document_metadatas: Optional[list[dict]] = None, index_name: Optional[str] = None, split_documents: bool = True, document_splitter_fn: Optional[Callable] = llama_index_sentence_splitter, preprocessing_fn: Optional[Union[Callable, list[Callable]]] = None, bsize: int = 32, use_faiss: bool = False, ): """Add documents to an existing index. Parameters: new_collection (list[str]): The documents to add to the index. new_document_metadatas (Optional[list[dict]]): An optional list of metadata dicts index_name (Optional[str]): The name of the index to add documents to. If None and by default, will add documents to the already initialised one. bsize (int): The batch size to use for encoding the passages. """ if not split_documents: document_splitter_fn = None ( new_collection, new_pid_docid_map, new_docid_metadata_map, ) = self._process_corpus( new_collection, new_document_ids, new_document_metadatas, document_splitter_fn, preprocessing_fn, self.model.config.doc_maxlen, ) self.model.add_to_index( new_collection, new_pid_docid_map, new_docid_metadata_map=new_docid_metadata_map, index_name=index_name, bsize=bsize, use_faiss=use_faiss, )
Add documents to an existing index. Parameters: new_collection (list[str]): The documents to add to the index. new_document_metadatas (Optional[list[dict]]): An optional list of metadata dicts index_name (Optional[str]): The name of the index to add documents to. If None and by default, will add documents to the already initialised one. bsize (int): The batch size to use for encoding the passages.
add_to_index
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def delete_from_index( self, document_ids: Union[TypeVar("T"), List[TypeVar("T")]], index_name: Optional[str] = None, ): """Delete documents from an index by their IDs. Parameters: document_ids (Union[TypeVar("T"), List[TypeVar("T")]]): The IDs of the documents to delete. index_name (Optional[str]): The name of the index to delete documents from. If None and by default, will delete documents from the already initialised one. """ self.model.delete_from_index( document_ids, index_name=index_name, )
Delete documents from an index by their IDs. Parameters: document_ids (Union[TypeVar("T"), List[TypeVar("T")]]): The IDs of the documents to delete. index_name (Optional[str]): The name of the index to delete documents from. If None and by default, will delete documents from the already initialised one.
delete_from_index
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def search( self, query: Union[str, list[str]], index_name: Optional["str"] = None, k: int = 10, force_fast: bool = False, zero_index_ranks: bool = False, doc_ids: Optional[list[str]] = None, **kwargs, ): """Query an index. Parameters: query (Union[str, list[str]]): The query or list of queries to search for. index_name (Optional[str]): Provide the name of an index to query. If None and by default, will query an already initialised one. k (int): The number of results to return for each query. force_fast (bool): Whether to force the use of a faster but less accurate search method. zero_index_ranks (bool): Whether to zero the index ranks of the results. By default, result rank 1 is the highest ranked result Returns: results (Union[list[dict], list[list[dict]]]): A list of dict containing individual results for each query. If a list of queries is provided, returns a list of lists of dicts. Each result is a dict with keys `content`, `score`, `rank`, and 'document_id'. If metadata was indexed for the document, it will be returned under the "document_metadata" key. Individual results are always in the format: ```python3 {"content": "text of the relevant passage", "score": 0.123456, "rank": 1, "document_id": "x"} ``` or ```python3 {"content": "text of the relevant passage", "score": 0.123456, "rank": 1, "document_id": "x", "document_metadata": {"metadata_key": "metadata_value", ...}} ``` """ return self.model.search( query=query, index_name=index_name, k=k, force_fast=force_fast, zero_index_ranks=zero_index_ranks, doc_ids=doc_ids, **kwargs, )
Query an index. Parameters: query (Union[str, list[str]]): The query or list of queries to search for. index_name (Optional[str]): Provide the name of an index to query. If None and by default, will query an already initialised one. k (int): The number of results to return for each query. force_fast (bool): Whether to force the use of a faster but less accurate search method. zero_index_ranks (bool): Whether to zero the index ranks of the results. By default, result rank 1 is the highest ranked result Returns: results (Union[list[dict], list[list[dict]]]): A list of dict containing individual results for each query. If a list of queries is provided, returns a list of lists of dicts. Each result is a dict with keys `content`, `score`, `rank`, and 'document_id'. If metadata was indexed for the document, it will be returned under the "document_metadata" key. Individual results are always in the format: ```python3 {"content": "text of the relevant passage", "score": 0.123456, "rank": 1, "document_id": "x"} ``` or ```python3 {"content": "text of the relevant passage", "score": 0.123456, "rank": 1, "document_id": "x", "document_metadata": {"metadata_key": "metadata_value", ...}} ```
search
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def rerank( self, query: Union[str, list[str]], documents: list[str], k: int = 10, zero_index_ranks: bool = False, bsize: Union[Literal["auto"], int] = "auto", ): """Encode documents and rerank them in-memory. Performance degrades rapidly with more documents. Parameters: query (Union[str, list[str]]): The query or list of queries to search for. documents (list[str]): The documents to rerank. k (int): The number of results to return for each query. zero_index_ranks (bool): Whether to zero the index ranks of the results. By default, result rank 1 is the highest ranked result bsize (int): The batch size to use for re-ranking. Returns: results (Union[list[dict], list[list[dict]]]): A list of dict containing individual results for each query. If a list of queries is provided, returns a list of lists of dicts. Each result is a dict with keys `content`, `score` and `rank`. Individual results are always in the format: ```python3 {"content": "text of the relevant passage", "score": 0.123456, "rank": 1} ``` """ return self.model.rank( query=query, documents=documents, k=k, zero_index_ranks=zero_index_ranks, bsize=bsize, )
Encode documents and rerank them in-memory. Performance degrades rapidly with more documents. Parameters: query (Union[str, list[str]]): The query or list of queries to search for. documents (list[str]): The documents to rerank. k (int): The number of results to return for each query. zero_index_ranks (bool): Whether to zero the index ranks of the results. By default, result rank 1 is the highest ranked result bsize (int): The batch size to use for re-ranking. Returns: results (Union[list[dict], list[list[dict]]]): A list of dict containing individual results for each query. If a list of queries is provided, returns a list of lists of dicts. Each result is a dict with keys `content`, `score` and `rank`. Individual results are always in the format: ```python3 {"content": "text of the relevant passage", "score": 0.123456, "rank": 1} ```
rerank
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def encode( self, documents: list[str], bsize: Union[Literal["auto"], int] = "auto", document_metadatas: Optional[list[dict]] = None, verbose: bool = True, max_document_length: Union[Literal["auto"], int] = "auto", ): """Encode documents in memory to be searched through with no Index. Performance degrades rapidly with more documents. Parameters: documents (list[str]): The documents to encode. bsize (int): The batch size to use for encoding. document_metadatas (Optional[list[dict]]): An optional list of metadata dicts. Each entry must correspond to a document. """ if verbose: print(f"Encoding {len(documents)} documents...") self.model.encode( documents=documents, bsize=bsize, document_metadatas=document_metadatas, verbose=verbose, max_tokens=max_document_length, ) if verbose: print("Documents encoded!")
Encode documents in memory to be searched through with no Index. Performance degrades rapidly with more documents. Parameters: documents (list[str]): The documents to encode. bsize (int): The batch size to use for encoding. document_metadatas (Optional[list[dict]]): An optional list of metadata dicts. Each entry must correspond to a document.
encode
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def search_encoded_docs( self, query: Union[str, list[str]], k: int = 10, bsize: int = 32, ) -> list[dict[str, Any]]: """Search through documents encoded in-memory. Parameters: query (Union[str, list[str]]): The query or list of queries to search for. k (int): The number of results to return for each query. batch_size (int): The batch size to use for searching. Returns: results (list[dict[str, Any]]): A list of dict containing individual results for each query. If a list of queries is provided, returns a list of lists of dicts. """ return self.model.search_encoded_docs( queries=query, k=k, bsize=bsize, )
Search through documents encoded in-memory. Parameters: query (Union[str, list[str]]): The query or list of queries to search for. k (int): The number of results to return for each query. batch_size (int): The batch size to use for searching. Returns: results (list[dict[str, Any]]): A list of dict containing individual results for each query. If a list of queries is provided, returns a list of lists of dicts.
search_encoded_docs
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def clear_encoded_docs(self, force: bool = False): """Clear documents encoded in-memory. Parameters: force (bool): Whether to force the clearing of encoded documents without enforcing a 10s wait time. """ self.model.clear_encoded_docs(force=force)
Clear documents encoded in-memory. Parameters: force (bool): Whether to force the clearing of encoded documents without enforcing a 10s wait time.
clear_encoded_docs
python
AnswerDotAI/RAGatouille
ragatouille/RAGPretrainedModel.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/RAGPretrainedModel.py
Apache-2.0
def _should_rebuild(current_len: int, new_doc_len: int) -> bool: """ Heuristic to determine if it is more efficient to rebuild the index instead of updating it. """ return current_len + new_doc_len < 5000 or new_doc_len > current_len * 0.05
Heuristic to determine if it is more efficient to rebuild the index instead of updating it.
_should_rebuild
python
AnswerDotAI/RAGatouille
ragatouille/models/index.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/models/index.py
Apache-2.0
def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, # noqa ) -> List[Document]: """Get documents relevant to a query.""" docs = self.model.search(query, **self.kwargs) return [ Document( page_content=doc["content"], metadata=doc.get("document_metadata", {}) ) for doc in docs ]
Get documents relevant to a query.
_get_relevant_documents
python
AnswerDotAI/RAGatouille
ragatouille/integrations/_langchain.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/integrations/_langchain.py
Apache-2.0
def compress_documents( self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None, # noqa **kwargs, ) -> Any: """Rerank a list of documents relevant to a query.""" doc_list = list(documents) _docs = [d.page_content for d in doc_list] results = self.model.rerank( query=query, documents=_docs, k=kwargs.get("k", self.k), **self.kwargs, ) final_results = [] for r in results: doc = doc_list[r["result_index"]] doc.metadata["relevance_score"] = r["score"] final_results.append(doc) return final_results
Rerank a list of documents relevant to a query.
compress_documents
python
AnswerDotAI/RAGatouille
ragatouille/integrations/_langchain.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/integrations/_langchain.py
Apache-2.0
def _make_individual_triplets(self, query, positives, negatives): """Create the training data in ColBERT(v1) format from raw lists of triplets""" if len(positives) == 0 or len(negatives) == 0: return [] triplets = [] q = self.query_map[query] random.seed(42) if len(positives) > 1: all_pos_texts = [p for p in positives] max_triplets_per_query = 20 negs_per_positive = max(1, max_triplets_per_query // len(all_pos_texts)) initial_triplets_count = 0 for pos in all_pos_texts: p = self.passage_map[pos] chosen_negs = random.sample( negatives, min(len(negatives), negs_per_positive) ) for neg in chosen_negs: n = self.passage_map[neg] initial_triplets_count += 1 triplets.append([q, p, n]) extra_triplets_needed = max_triplets_per_query - initial_triplets_count if extra_triplets_needed > 0: all_combinations = list(product(all_pos_texts, negatives)) random.seed(42) random.shuffle(all_combinations) for pos, neg in all_combinations: p = self.passage_map[pos] n = self.passage_map[neg] if [q, p, n] not in triplets: triplets.append([q, p, n]) extra_triplets_needed -= 1 if extra_triplets_needed <= 0: break else: p = self.passage_map[positives[0]] for n in negatives: triplets.append([q, p, self.passage_map[n]]) return triplets
Create the training data in ColBERT(v1) format from raw lists of triplets
_make_individual_triplets
python
AnswerDotAI/RAGatouille
ragatouille/data/training_data_processor.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/data/training_data_processor.py
Apache-2.0
def _get_new_negatives(self, query, passages, mine_hard_negatives, n_new_negatives): """Generate new negatives for each query, using either: - The assigned hard negative miner if mine_hard_negatives is True - Randomly sampling from the full collection otherwise """ if mine_hard_negatives: hard_negatives = self.negative_miner.mine_hard_negatives(query) candidates = [ x for x in hard_negatives if x not in passages["positives"] and x not in passages["negatives"] ] new_negatives = random.sample( candidates, min(n_new_negatives, len(candidates)), ) else: new_negatives = [ x for x in random.sample( self.collection, min(n_new_negatives, len(self.collection)) ) if x not in passages["positives"] and x not in passages["negatives"] ] return new_negatives
Generate new negatives for each query, using either: - The assigned hard negative miner if mine_hard_negatives is True - Randomly sampling from the full collection otherwise
_get_new_negatives
python
AnswerDotAI/RAGatouille
ragatouille/data/training_data_processor.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/data/training_data_processor.py
Apache-2.0
def _process_raw_pairs(self, raw_data, mine_hard_negatives, n_new_negatives): """Convert unlabeled pairs into training triplets. It's assumed unlabeled pairs are always in the format (query, relevant_passage)""" training_triplets = [] raw_grouped_triplets = defaultdict(lambda: defaultdict(list)) for query, positive in raw_data: if isinstance(positive, str): positive = [positive] elif isinstance(positive, dict): positive = [positive["content"]] raw_grouped_triplets[query]["positives"] += positive for query, passages in raw_grouped_triplets.items(): if n_new_negatives > 0: passages["negatives"] += self._get_new_negatives( query=query, passages=passages, mine_hard_negatives=mine_hard_negatives, n_new_negatives=n_new_negatives, ) training_triplets += self._make_individual_triplets( query=query, positives=list(set(passages["positives"])), negatives=list(set(passages["negatives"])), ) self.training_triplets = training_triplets
Convert unlabeled pairs into training triplets. It's assumed unlabeled pairs are always in the format (query, relevant_passage)
_process_raw_pairs
python
AnswerDotAI/RAGatouille
ragatouille/data/training_data_processor.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/data/training_data_processor.py
Apache-2.0
def _process_raw_labeled_pairs( self, raw_data, mine_hard_negatives, n_new_negatives, positive_label, negative_label, ): """ Convert labeled pairs intro training triplets. Labeled pairs are in the format (query, passage, label) """ training_triplets = [] raw_grouped_triplets = defaultdict(lambda: defaultdict(list)) for query, passage, label in raw_data: if isinstance(passage, str): passage = [passage] if label == positive_label: label = "positives" elif label == negative_label: label = "negatives" else: raise ValueError( f"Label {label} must correspond to either positive_label or negative_label!" ) raw_grouped_triplets[query][label] += passage for query, passages in raw_grouped_triplets.items(): if n_new_negatives > 0: passages["negatives"] += self._get_new_negatives( query=query, passages=passages, mine_hard_negatives=mine_hard_negatives, n_new_negatives=n_new_negatives, ) training_triplets += self._make_individual_triplets( query=query, positives=passages["positives"], negatives=passages["negatives"], ) self.training_triplets = training_triplets
Convert labeled pairs intro training triplets. Labeled pairs are in the format (query, passage, label)
_process_raw_labeled_pairs
python
AnswerDotAI/RAGatouille
ragatouille/data/training_data_processor.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/data/training_data_processor.py
Apache-2.0
def _process_raw_triplets(self, raw_data, mine_hard_negatives, n_new_negatives): """ Convert raw triplets (query, positives : str | list[str], negatives: str | list[str]) into training triplets. """ training_triplets = [] raw_grouped_triplets = defaultdict(lambda: defaultdict(list)) for query, positive, negative in raw_data: if isinstance(positive, str): positive = [positive] if isinstance(negative, str): negative = [negative] raw_grouped_triplets[query]["positives"] += positive raw_grouped_triplets[query]["negatives"] += negative for query, passages in raw_grouped_triplets.items(): if n_new_negatives > 0: passages["negatives"] += self._get_new_negatives( query=query, passages=passages, mine_hard_negatives=mine_hard_negatives, n_new_negatives=n_new_negatives, ) training_triplets += self._make_individual_triplets( query=query, positives=passages["positives"], negatives=passages["negatives"], ) self.training_triplets = training_triplets
Convert raw triplets (query, positives : str | list[str], negatives: str | list[str]) into training triplets.
_process_raw_triplets
python
AnswerDotAI/RAGatouille
ragatouille/data/training_data_processor.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/data/training_data_processor.py
Apache-2.0
def _make_data_map(self): """ Generate a query_text: query_id and passage_text: passage_id mapping To easily generate ColBERT-format training data. """ self.query_map = {} self.passage_map = {} for i, query in enumerate(self.queries): self.query_map[query] = i for i, passage in enumerate(list(self.collection)): self.passage_map[passage] = i
Generate a query_text: query_id and passage_text: passage_id mapping To easily generate ColBERT-format training data.
_make_data_map
python
AnswerDotAI/RAGatouille
ragatouille/data/training_data_processor.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/data/training_data_processor.py
Apache-2.0
def export_training_data(self, path: Union[str, Path]): """ Export training data for both training and versioning purposes. {path} should ideally be dvc versioned. """ path = Path(path) # Create the directory if it does not exist os.makedirs(path, exist_ok=True) with open(path / "queries.train.colbert.tsv", "w") as f: for query, idx in self.query_map.items(): query = query.replace("\t", " ").replace("\n", " ") f.write(f"{idx}\t{query}\n") with open(path / "corpus.train.colbert.tsv", "w") as f: for document, idx in self.passage_map.items(): document = document.replace("\t", " ").replace("\n", " ") f.write(f"{idx}\t{document}\n") random.seed(42) random.shuffle(self.training_triplets) srsly.write_jsonl(path / "triples.train.colbert.jsonl", self.training_triplets)
Export training data for both training and versioning purposes. {path} should ideally be dvc versioned.
export_training_data
python
AnswerDotAI/RAGatouille
ragatouille/data/training_data_processor.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/data/training_data_processor.py
Apache-2.0
def _batch_mine( self, queries: list[str], ): """Separate function to parallelise later on""" print(f"Retrieving hard negatives for {len(queries)} queries...") results = [] print("Embedding queries...") query_embeddings = self.model.encode(queries, show_progress_bar=True) print("Retrieving hard negatives...") for q_emb in tqdm(query_embeddings): query_results = self.query_index(q_emb, top_k=self.max_rank) query_results = query_results[self.min_rank : self.max_rank] query_results = [self.corpus_map[x.id] for x in query_results] results.append(query_results) print(f"""Done generating hard negatives.""") return results
Separate function to parallelise later on
_batch_mine
python
AnswerDotAI/RAGatouille
ragatouille/negative_miners/simpleminer.py
https://github.com/AnswerDotAI/RAGatouille/blob/master/ragatouille/negative_miners/simpleminer.py
Apache-2.0
def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens): with torch.no_grad(): word_embs = word_embs.detach().to(self.device).float() pos_ohot = pos_ohot.detach().to(self.device).float() motions = motions.detach().to(self.device).float() align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() motions = motions[align_idx] m_lens = m_lens[align_idx] '''Movement Encoding''' movements = self.movement_encoder(motions[..., :-4]).detach() m_lens = m_lens // self.opt.unit_length motion_embedding = self.motion_encoder(movements, m_lens) '''Text Encoding''' text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens) text_embedding = text_embedding[align_idx] return text_embedding, motion_embedding
Movement Encoding
get_co_embeddings
python
EricGuo5513/momask-codes
models/t2m_eval_wrapper.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/t2m_eval_wrapper.py
MIT
def get_motion_embeddings(self, motions, m_lens): with torch.no_grad(): motions = motions.detach().to(self.device).float() align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() motions = motions[align_idx] m_lens = m_lens[align_idx] '''Movement Encoding''' movements = self.movement_encoder(motions[..., :-4]).detach() m_lens = m_lens // self.opt.unit_length motion_embedding = self.motion_encoder(movements, m_lens) return motion_embedding
Movement Encoding
get_motion_embeddings
python
EricGuo5513/momask-codes
models/t2m_eval_wrapper.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/t2m_eval_wrapper.py
MIT
def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens): with torch.no_grad(): word_embs = word_embs.detach().to(self.device).float() pos_ohot = pos_ohot.detach().to(self.device).float() motions = motions.detach().to(self.device).float() align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() motions = motions[align_idx] m_lens = m_lens[align_idx] '''Movement Encoding''' movements = self.movement_encoder(motions[..., :-4]).detach() m_lens = m_lens // self.opt['unit_length'] motion_embedding = self.motion_encoder(movements, m_lens) # print(motions.shape, movements.shape, motion_embedding.shape, m_lens) '''Text Encoding''' text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens) text_embedding = text_embedding[align_idx] return text_embedding, motion_embedding
Movement Encoding
get_co_embeddings
python
EricGuo5513/momask-codes
models/t2m_eval_wrapper.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/t2m_eval_wrapper.py
MIT
def get_motion_embeddings(self, motions, m_lens): with torch.no_grad(): motions = motions.detach().to(self.device).float() align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() motions = motions[align_idx] m_lens = m_lens[align_idx] '''Movement Encoding''' movements = self.movement_encoder(motions[..., :-4]).detach() m_lens = m_lens // self.opt['unit_length'] motion_embedding = self.motion_encoder(movements, m_lens) return motion_embedding
Movement Encoding
get_motion_embeddings
python
EricGuo5513/momask-codes
models/t2m_eval_wrapper.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/t2m_eval_wrapper.py
MIT
def cal_loss(pred, labels, ignore_index=None, smoothing=0.): '''Calculate cross entropy loss, apply label smoothing if needed.''' # print(pred.shape, labels.shape) #torch.Size([64, 1028, 55]) torch.Size([64, 55]) # print(pred.shape, labels.shape) #torch.Size([64, 1027, 55]) torch.Size([64, 55]) if smoothing: space = 2 n_class = pred.size(1) mask = labels.ne(ignore_index) one_hot = rearrange(F.one_hot(labels, n_class + space), 'a ... b -> a b ...')[:, :n_class] # one_hot = torch.zeros_like(pred).scatter(1, labels.unsqueeze(1), 1) sm_one_hot = one_hot * (1 - smoothing) + (1 - one_hot) * smoothing / (n_class - 1) neg_log_prb = -F.log_softmax(pred, dim=1) loss = (sm_one_hot * neg_log_prb).sum(dim=1) # loss = F.cross_entropy(pred, sm_one_hot, reduction='none') loss = torch.mean(loss.masked_select(mask)) else: loss = F.cross_entropy(pred, labels, ignore_index=ignore_index) return loss
Calculate cross entropy loss, apply label smoothing if needed.
cal_loss
python
EricGuo5513/momask-codes
models/mask_transformer/tools.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/tools.py
MIT
def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1, clip_dim=512, cond_drop_prob=0.1, clip_version=None, opt=None, **kargs): super(MaskTransformer, self).__init__() print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}') self.code_dim = code_dim self.latent_dim = latent_dim self.clip_dim = clip_dim self.dropout = dropout self.opt = opt self.cond_mode = cond_mode self.cond_drop_prob = cond_drop_prob if self.cond_mode == 'action': assert 'num_actions' in kargs self.num_actions = kargs.get('num_actions', 1) ''' Preparing Networks ''' self.input_process = InputProcess(self.code_dim, self.latent_dim) self.position_enc = PositionalEncoding(self.latent_dim, self.dropout) seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation='gelu') self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer, num_layers=num_layers) self.encode_action = partial(F.one_hot, num_classes=self.num_actions) # if self.cond_mode != 'no_cond': if self.cond_mode == 'text': self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim) elif self.cond_mode == 'action': self.cond_emb = nn.Linear(self.num_actions, self.latent_dim) elif self.cond_mode == 'uncond': self.cond_emb = nn.Identity() else: raise KeyError("Unsupported condition mode!!!") _num_tokens = opt.num_tokens + 2 # two dummy tokens, one for masking, one for padding self.mask_id = opt.num_tokens self.pad_id = opt.num_tokens + 1 self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim) self.token_emb = nn.Embedding(_num_tokens, self.code_dim) self.apply(self.__init_weights) ''' Preparing frozen weights ''' if self.cond_mode == 'text': print('Loading CLIP...') self.clip_version = clip_version self.clip_model = self.load_and_freeze_clip(clip_version) self.noise_schedule = cosine_schedule
Preparing Networks
__init__
python
EricGuo5513/momask-codes
models/mask_transformer/transformer.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py
MIT
def load_and_freeze_token_emb(self, codebook): ''' :param codebook: (c, d) :return: ''' assert self.training, 'Only necessary in training mode' c, d = codebook.shape self.token_emb.weight = nn.Parameter(torch.cat([codebook, torch.zeros(size=(2, d), device=codebook.device)], dim=0)) #add two dummy tokens, 0 vectors self.token_emb.requires_grad_(False) # self.token_emb.weight.requires_grad = False # self.token_emb_ready = True print("Token embedding initialized!")
:param codebook: (c, d) :return:
load_and_freeze_token_emb
python
EricGuo5513/momask-codes
models/mask_transformer/transformer.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py
MIT
def trans_forward(self, motion_ids, cond, padding_mask, force_mask=False): ''' :param motion_ids: (b, seqlen) :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE :param cond: (b, embed_dim) for text, (b, num_actions) for action :param force_mask: boolean :return: -logits: (b, num_token, seqlen) ''' cond = self.mask_cond(cond, force_mask=force_mask) # print(motion_ids.shape) x = self.token_emb(motion_ids) # print(x.shape) # (b, seqlen, d) -> (seqlen, b, latent_dim) x = self.input_process(x) cond = self.cond_emb(cond).unsqueeze(0) #(1, b, latent_dim) x = self.position_enc(x) xseq = torch.cat([cond, x], dim=0) #(seqlen+1, b, latent_dim) padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:1]), padding_mask], dim=1) #(b, seqlen+1) # print(xseq.shape, padding_mask.shape) # print(padding_mask.shape, xseq.shape) output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[1:] #(seqlen, b, e) logits = self.output_process(output) #(seqlen, b, e) -> (b, ntoken, seqlen) return logits
:param motion_ids: (b, seqlen) :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE :param cond: (b, embed_dim) for text, (b, num_actions) for action :param force_mask: boolean :return: -logits: (b, num_token, seqlen)
trans_forward
python
EricGuo5513/momask-codes
models/mask_transformer/transformer.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py
MIT
def forward(self, ids, y, m_lens): ''' :param ids: (b, n) :param y: raw text for cond_mode=text, (b, ) for cond_mode=action :m_lens: (b,) :return: ''' bs, ntokens = ids.shape device = ids.device # Positions that are PADDED are ALL FALSE non_pad_mask = lengths_to_mask(m_lens, ntokens) #(b, n) ids = torch.where(non_pad_mask, ids, self.pad_id) force_mask = False if self.cond_mode == 'text': with torch.no_grad(): cond_vector = self.encode_text(y) elif self.cond_mode == 'action': cond_vector = self.enc_action(y).to(device).float() elif self.cond_mode == 'uncond': cond_vector = torch.zeros(bs, self.latent_dim).float().to(device) force_mask = True else: raise NotImplementedError("Unsupported condition mode!!!") ''' Prepare mask ''' rand_time = uniform((bs,), device=device) rand_mask_probs = self.noise_schedule(rand_time) num_token_masked = (ntokens * rand_mask_probs).round().clamp(min=1) batch_randperm = torch.rand((bs, ntokens), device=device).argsort(dim=-1) # Positions to be MASKED are ALL TRUE mask = batch_randperm < num_token_masked.unsqueeze(-1) # Positions to be MASKED must also be NON-PADDED mask &= non_pad_mask # Note this is our training target, not input labels = torch.where(mask, ids, self.mask_id) x_ids = ids.clone() # Further Apply Bert Masking Scheme # Step 1: 10% replace with an incorrect token mask_rid = get_mask_subset_prob(mask, 0.1) rand_id = torch.randint_like(x_ids, high=self.opt.num_tokens) x_ids = torch.where(mask_rid, rand_id, x_ids) # Step 2: 90% x 10% replace with correct token, and 90% x 88% replace with mask token mask_mid = get_mask_subset_prob(mask & ~mask_rid, 0.88) # mask_mid = mask x_ids = torch.where(mask_mid, self.mask_id, x_ids) logits = self.trans_forward(x_ids, cond_vector, ~non_pad_mask, force_mask) ce_loss, pred_id, acc = cal_performance(logits, labels, ignore_index=self.mask_id) return ce_loss, pred_id, acc
:param ids: (b, n) :param y: raw text for cond_mode=text, (b, ) for cond_mode=action :m_lens: (b,) :return:
forward
python
EricGuo5513/momask-codes
models/mask_transformer/transformer.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py
MIT
def generate(self, conds, m_lens, timesteps: int, cond_scale: int, temperature=1, topk_filter_thres=0.9, gsample=False, force_mask=False ): # print(self.opt.num_quantizers) # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers device = next(self.parameters()).device seq_len = max(m_lens) batch_size = len(m_lens) if self.cond_mode == 'text': with torch.no_grad(): cond_vector = self.encode_text(conds) elif self.cond_mode == 'action': cond_vector = self.enc_action(conds).to(device) elif self.cond_mode == 'uncond': cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device) else: raise NotImplementedError("Unsupported condition mode!!!") padding_mask = ~lengths_to_mask(m_lens, seq_len) # print(padding_mask.shape, ) # Start from all tokens being masked ids = torch.where(padding_mask, self.pad_id, self.mask_id) scores = torch.where(padding_mask, 1e5, 0.) starting_temperature = temperature for timestep, steps_until_x0 in zip(torch.linspace(0, 1, timesteps, device=device), reversed(range(timesteps))): # 0 < timestep < 1 rand_mask_prob = self.noise_schedule(timestep) # Tensor ''' Maskout, and cope with variable length ''' # fix: the ratio regarding lengths, instead of seq_len num_token_masked = torch.round(rand_mask_prob * m_lens).clamp(min=1) # (b, ) # select num_token_masked tokens with lowest scores to be masked sorted_indices = scores.argsort( dim=1) # (b, k), sorted_indices[i, j] = the index of j-th lowest element in scores on dim=1 ranks = sorted_indices.argsort(dim=1) # (b, k), rank[i, j] = the rank (0: lowest) of scores[i, j] on dim=1 is_mask = (ranks < num_token_masked.unsqueeze(-1)) ids = torch.where(is_mask, self.mask_id, ids) ''' Preparing input ''' # (b, num_token, seqlen) logits = self.forward_with_cond_scale(ids, cond_vector=cond_vector, padding_mask=padding_mask, cond_scale=cond_scale, force_mask=force_mask) logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken) # print(logits.shape, self.opt.num_tokens) # clean low prob token filtered_logits = top_k(logits, topk_filter_thres, dim=-1) ''' Update ids ''' # if force_mask: temperature = starting_temperature # else: # temperature = starting_temperature * (steps_until_x0 / timesteps) # temperature = max(temperature, 1e-4) # print(filtered_logits.shape) # temperature is annealed, gradually reducing temperature as well as randomness if gsample: # use gumbel_softmax sampling # print("1111") pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen) else: # use multinomial sampling # print("2222") probs = F.softmax(filtered_logits / temperature, dim=-1) # (b, seqlen, ntoken) # print(temperature, starting_temperature, steps_until_x0, timesteps) # print(probs / temperature) pred_ids = Categorical(probs).sample() # (b, seqlen) # print(pred_ids.max(), pred_ids.min()) # if pred_ids. ids = torch.where(is_mask, pred_ids, ids) ''' Updating scores ''' probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken) scores = probs_without_temperature.gather(2, pred_ids.unsqueeze(dim=-1)) # (b, seqlen, 1) scores = scores.squeeze(-1) # (b, seqlen) # We do not want to re-mask the previously kept tokens, or pad tokens scores = scores.masked_fill(~is_mask, 1e5) ids = torch.where(padding_mask, -1, ids) # print("Final", ids.max(), ids.min()) return ids
Maskout, and cope with variable length
generate
python
EricGuo5513/momask-codes
models/mask_transformer/transformer.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py
MIT
def edit(self, conds, tokens, m_lens, timesteps: int, cond_scale: int, temperature=1, topk_filter_thres=0.9, gsample=False, force_mask=False, edit_mask=None, padding_mask=None, ): assert edit_mask.shape == tokens.shape if edit_mask is not None else True device = next(self.parameters()).device seq_len = tokens.shape[1] if self.cond_mode == 'text': with torch.no_grad(): cond_vector = self.encode_text(conds) elif self.cond_mode == 'action': cond_vector = self.enc_action(conds).to(device) elif self.cond_mode == 'uncond': cond_vector = torch.zeros(1, self.latent_dim).float().to(device) else: raise NotImplementedError("Unsupported condition mode!!!") if padding_mask == None: padding_mask = ~lengths_to_mask(m_lens, seq_len) # Start from all tokens being masked if edit_mask == None: mask_free = True ids = torch.where(padding_mask, self.pad_id, tokens) edit_mask = torch.ones_like(padding_mask) edit_mask = edit_mask & ~padding_mask edit_len = edit_mask.sum(dim=-1) scores = torch.where(edit_mask, 0., 1e5) else: mask_free = False edit_mask = edit_mask & ~padding_mask edit_len = edit_mask.sum(dim=-1) ids = torch.where(edit_mask, self.mask_id, tokens) scores = torch.where(edit_mask, 0., 1e5) starting_temperature = temperature for timestep, steps_until_x0 in zip(torch.linspace(0, 1, timesteps, device=device), reversed(range(timesteps))): # 0 < timestep < 1 rand_mask_prob = 0.16 if mask_free else self.noise_schedule(timestep) # Tensor ''' Maskout, and cope with variable length ''' # fix: the ratio regarding lengths, instead of seq_len num_token_masked = torch.round(rand_mask_prob * edit_len).clamp(min=1) # (b, ) # select num_token_masked tokens with lowest scores to be masked sorted_indices = scores.argsort( dim=1) # (b, k), sorted_indices[i, j] = the index of j-th lowest element in scores on dim=1 ranks = sorted_indices.argsort(dim=1) # (b, k), rank[i, j] = the rank (0: lowest) of scores[i, j] on dim=1 is_mask = (ranks < num_token_masked.unsqueeze(-1)) # is_mask = (torch.rand_like(scores) < 0.8) * ~padding_mask if mask_free else is_mask ids = torch.where(is_mask, self.mask_id, ids) ''' Preparing input ''' # (b, num_token, seqlen) logits = self.forward_with_cond_scale(ids, cond_vector=cond_vector, padding_mask=padding_mask, cond_scale=cond_scale, force_mask=force_mask) logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken) # print(logits.shape, self.opt.num_tokens) # clean low prob token filtered_logits = top_k(logits, topk_filter_thres, dim=-1) ''' Update ids ''' # if force_mask: temperature = starting_temperature # else: # temperature = starting_temperature * (steps_until_x0 / timesteps) # temperature = max(temperature, 1e-4) # print(filtered_logits.shape) # temperature is annealed, gradually reducing temperature as well as randomness if gsample: # use gumbel_softmax sampling # print("1111") pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen) else: # use multinomial sampling # print("2222") probs = F.softmax(filtered_logits / temperature, dim=-1) # (b, seqlen, ntoken) # print(temperature, starting_temperature, steps_until_x0, timesteps) # print(probs / temperature) pred_ids = Categorical(probs).sample() # (b, seqlen) # print(pred_ids.max(), pred_ids.min()) # if pred_ids. ids = torch.where(is_mask, pred_ids, ids) ''' Updating scores ''' probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken) scores = probs_without_temperature.gather(2, pred_ids.unsqueeze(dim=-1)) # (b, seqlen, 1) scores = scores.squeeze(-1) # (b, seqlen) # We do not want to re-mask the previously kept tokens, or pad tokens scores = scores.masked_fill(~edit_mask, 1e5) if mask_free else scores.masked_fill(~is_mask, 1e5) ids = torch.where(padding_mask, -1, ids) # print("Final", ids.max(), ids.min()) return ids
Maskout, and cope with variable length
edit
python
EricGuo5513/momask-codes
models/mask_transformer/transformer.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py
MIT
def edit_beta(self, conds, conds_og, tokens, m_lens, cond_scale: int, force_mask=False, ): device = next(self.parameters()).device seq_len = tokens.shape[1] if self.cond_mode == 'text': with torch.no_grad(): cond_vector = self.encode_text(conds) if conds_og is not None: cond_vector_og = self.encode_text(conds_og) else: cond_vector_og = None elif self.cond_mode == 'action': cond_vector = self.enc_action(conds).to(device) if conds_og is not None: cond_vector_og = self.enc_action(conds_og).to(device) else: cond_vector_og = None else: raise NotImplementedError("Unsupported condition mode!!!") padding_mask = ~lengths_to_mask(m_lens, seq_len) # Start from all tokens being masked ids = torch.where(padding_mask, self.pad_id, tokens) # Do not mask anything ''' Preparing input ''' # (b, num_token, seqlen) logits = self.forward_with_cond_scale(ids, cond_vector=cond_vector, cond_vector_neg=cond_vector_og, padding_mask=padding_mask, cond_scale=cond_scale, force_mask=force_mask) logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken) ''' Updating scores ''' probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken) tokens[tokens == -1] = 0 # just to get through an error when index = -1 using gather og_tokens_scores = probs_without_temperature.gather(2, tokens.unsqueeze(dim=-1)) # (b, seqlen, 1) og_tokens_scores = og_tokens_scores.squeeze(-1) # (b, seqlen) return og_tokens_scores
Preparing input
edit_beta
python
EricGuo5513/momask-codes
models/mask_transformer/transformer.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py
MIT
def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1, num_heads=4, dropout=0.1, clip_dim=512, shared_codebook=False, share_weight=False, clip_version=None, opt=None, **kargs): super(ResidualTransformer, self).__init__() print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}') # assert shared_codebook == True, "Only support shared codebook right now!" self.code_dim = code_dim self.latent_dim = latent_dim self.clip_dim = clip_dim self.dropout = dropout self.opt = opt self.cond_mode = cond_mode # self.cond_drop_prob = cond_drop_prob if self.cond_mode == 'action': assert 'num_actions' in kargs self.num_actions = kargs.get('num_actions', 1) self.cond_drop_prob = cond_drop_prob ''' Preparing Networks ''' self.input_process = InputProcess(self.code_dim, self.latent_dim) self.position_enc = PositionalEncoding(self.latent_dim, self.dropout) seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation='gelu') self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer, num_layers=num_layers) self.encode_quant = partial(F.one_hot, num_classes=self.opt.num_quantizers) self.encode_action = partial(F.one_hot, num_classes=self.num_actions) self.quant_emb = nn.Linear(self.opt.num_quantizers, self.latent_dim) # if self.cond_mode != 'no_cond': if self.cond_mode == 'text': self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim) elif self.cond_mode == 'action': self.cond_emb = nn.Linear(self.num_actions, self.latent_dim) else: raise KeyError("Unsupported condition mode!!!") _num_tokens = opt.num_tokens + 1 # one dummy tokens for padding self.pad_id = opt.num_tokens # self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim) self.output_process = OutputProcess(out_feats=code_dim, latent_dim=latent_dim) if shared_codebook: token_embed = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim))) self.token_embed_weight = token_embed.expand(opt.num_quantizers-1, _num_tokens, code_dim) if share_weight: self.output_proj_weight = self.token_embed_weight self.output_proj_bias = None else: output_proj = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim))) output_bias = nn.Parameter(torch.zeros(size=(_num_tokens,))) # self.output_proj_bias = 0 self.output_proj_weight = output_proj.expand(opt.num_quantizers-1, _num_tokens, code_dim) self.output_proj_bias = output_bias.expand(opt.num_quantizers-1, _num_tokens) else: if share_weight: self.embed_proj_shared_weight = nn.Parameter(torch.normal(mean=0, std=0.02, size=(opt.num_quantizers - 2, _num_tokens, code_dim))) self.token_embed_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim))) self.output_proj_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim))) self.output_proj_bias = None self.registered = False else: output_proj_weight = torch.normal(mean=0, std=0.02, size=(opt.num_quantizers - 1, _num_tokens, code_dim)) self.output_proj_weight = nn.Parameter(output_proj_weight) self.output_proj_bias = nn.Parameter(torch.zeros(size=(opt.num_quantizers, _num_tokens))) token_embed_weight = torch.normal(mean=0, std=0.02, size=(opt.num_quantizers - 1, _num_tokens, code_dim)) self.token_embed_weight = nn.Parameter(token_embed_weight) self.apply(self.__init_weights) self.shared_codebook = shared_codebook self.share_weight = share_weight if self.cond_mode == 'text': print('Loading CLIP...') self.clip_version = clip_version self.clip_model = self.load_and_freeze_clip(clip_version)
Preparing Networks
__init__
python
EricGuo5513/momask-codes
models/mask_transformer/transformer.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py
MIT
def output_project(self, logits, qids): ''' :logits: (bs, code_dim, seqlen) :qids: (bs) :return: -logits (bs, ntoken, seqlen) ''' # (num_qlayers-1, num_token, code_dim) -> (bs, ntoken, code_dim) output_proj_weight = self.output_proj_weight[qids] # (num_qlayers, ntoken) -> (bs, ntoken) output_proj_bias = None if self.output_proj_bias is None else self.output_proj_bias[qids] output = torch.einsum('bnc, bcs->bns', output_proj_weight, logits) if output_proj_bias is not None: output += output + output_proj_bias.unsqueeze(-1) return output
:logits: (bs, code_dim, seqlen) :qids: (bs) :return: -logits (bs, ntoken, seqlen)
output_project
python
EricGuo5513/momask-codes
models/mask_transformer/transformer.py
https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py
MIT