version
stringclasses 25
values | code
stringlengths 75
178k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 9
78
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.6 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from UMRFormer_Net.inference.predict import predict_from_folder
from UMRFormer_Net.paths import default_plans_identifier, network_training_output_dir, default_cascade_trainer, default_trainer
from batchgenerators.utilities.file_and_folder_operations import join, isdir
from UMRFormer_Net.utilities.task_name_id_conversion import convert_id_to_task_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", '--input_folder', help="Must contain all modalities for each patient in the correct"
" order (same as training). Files must be named "
"CASENAME_XXXX.nii.gz where XXXX is the modality "
"identifier (0000, 0001, etc)", required=True)
parser.add_argument('-o', "--output_folder", required=True, help="folder for saving predictions")
parser.add_argument('-t', '--task_name', help='task name or task ID, required.',
default=default_plans_identifier, required=True)
parser.add_argument('-tr', '--trainer_class_name',
help='Name of the UMRFormerTrainer used for 2D U-Net, full resolution 3D U-Net and low resolution '
'U-Net. The default is %s. If you are running inference with the cascade and the folder '
'pointed to by --lowres_segmentations does not contain the segmentation maps generated by '
'the low resolution U-Net then the low resolution segmentation maps will be automatically '
'generated. For this case, make sure to set the trainer class here that matches your '
'--cascade_trainer_class_name (this part can be ignored if defaults are used).'
% default_trainer,
required=False,
default=default_trainer)
parser.add_argument('-ctr', '--cascade_trainer_class_name',
help="Trainer class name used for predicting the 3D full resolution U-Net part of the cascade."
"Default is %s" % default_cascade_trainer, required=False,
default=default_cascade_trainer)
parser.add_argument('-m', '--model', help="2d, 3d_lowres, 3d_fullres or 3d_cascade_fullres. Default: 3d_fullres",
default="3d_fullres", required=False)
parser.add_argument('-p', '--plans_identifier', help='do not touch this unless you know what you are doing',
default=default_plans_identifier, required=False)
parser.add_argument('-f', '--folds', nargs='+', default='None',
help="folds to use for prediction. Default is None which means that folds will be detected "
"automatically in the model output folder")
parser.add_argument('-z', '--save_npz', required=False, action='store_true',
help="use this if you want to ensemble these predictions with those of other models. Softmax "
"probabilities will be saved as compressed numpy arrays in output_folder and can be "
"merged between output_folders with UMRFormer_ensemble_predictions")
parser.add_argument('-l', '--lowres_segmentations', required=False, default='None',
help="if model is the highres stage of the cascade then you can use this folder to provide "
"predictions from the low resolution 3D U-Net. If this is left at default, the "
"predictions will be generated automatically (provided that the 3D low resolution U-Net "
"network weights are present")
parser.add_argument("--part_id", type=int, required=False, default=0, help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (for example via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_parts", type=int, required=False, default=1,
help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_threads_preprocessing", required=False, default=6, type=int, help=
"Determines many background processes will be used for data preprocessing. Reduce this if you "
"run into out of memory (RAM) problems. Default: 6")
parser.add_argument("--num_threads_nifti_save", required=False, default=2, type=int, help=
"Determines many background processes will be used for segmentation export. Reduce this if you "
"run into out of memory (RAM) problems. Default: 2")
parser.add_argument("--disable_tta", required=False, default=False, action="store_true",
help="set this flag to disable test time data augmentation via mirroring. Speeds up inference "
"by roughly factor 4 (2D) or 8 (3D)")
parser.add_argument("--overwrite_existing", required=False, default=False, action="store_true",
help="Set this flag if the target folder contains predictions that you would like to overwrite")
parser.add_argument("--mode", type=str, default="normal", required=False, help="Hands off!")
parser.add_argument("--all_in_gpu", type=str, default="None", required=False, help="can be None, False or True. "
"Do not touch.")
parser.add_argument("--step_size", type=float, default=0.5, required=False, help="don't touch")
# parser.add_argument("--interp_order", required=False, default=3, type=int,
# help="order of interpolation for segmentations, has no effect if mode=fastest. Do not touch this.")
# parser.add_argument("--interp_order_z", required=False, default=0, type=int,
# help="order of interpolation along z is z is done differently. Do not touch this.")
# parser.add_argument("--force_separate_z", required=False, default="None", type=str,
# help="force_separate_z resampling. Can be None, True or False, has no effect if mode=fastest. "
# "Do not touch this.")
parser.add_argument('-chk',
help='checkpoint name, default: model_final_checkpoint',
required=False,
default='model_final_checkpoint')
parser.add_argument('--disable_mixed_precision', default=False, action='store_true', required=False,
help='Predictions are done with mixed precision by default. This improves speed and reduces '
'the required vram. If you want to disable mixed precision you can set this flag. Note '
'that yhis is not recommended (mixed precision is ~2x faster!)')
args = parser.parse_args()
input_folder = args.input_folder
output_folder = args.output_folder
part_id = args.part_id
num_parts = args.num_parts
folds = args.folds
save_npz = args.save_npz
lowres_segmentations = args.lowres_segmentations
num_threads_preprocessing = args.num_threads_preprocessing
num_threads_nifti_save = args.num_threads_nifti_save
disable_tta = args.disable_tta
step_size = args.step_size
# interp_order = args.interp_order
# interp_order_z = args.interp_order_z
# force_separate_z = args.force_separate_z
overwrite_existing = args.overwrite_existing
mode = args.mode
all_in_gpu = args.all_in_gpu
model = args.model
trainer_class_name = args.trainer_class_name
cascade_trainer_class_name = args.cascade_trainer_class_name
task_name = args.task_name
if not task_name.startswith("Task"):
task_id = int(task_name)
task_name = convert_id_to_task_name(task_id)
assert model in ["2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"], "-m must be 2d, 3d_lowres, 3d_fullres or " \
"3d_cascade_fullres"
# if force_separate_z == "None":
# force_separate_z = None
# elif force_separate_z == "False":
# force_separate_z = False
# elif force_separate_z == "True":
# force_separate_z = True
# else:
# raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z)
if lowres_segmentations == "None":
lowres_segmentations = None
if isinstance(folds, list):
if folds[0] == 'all' and len(folds) == 1:
pass
else:
folds = [int(i) for i in folds]
elif folds == "None":
folds = None
else:
raise ValueError("Unexpected value for argument folds")
assert all_in_gpu in ['None', 'False', 'True']
if all_in_gpu == "None":
all_in_gpu = None
elif all_in_gpu == "True":
all_in_gpu = True
elif all_in_gpu == "False":
all_in_gpu = False
# we need to catch the case where model is 3d cascade fullres and the low resolution folder has not been set.
# In that case we need to try and predict with 3d low res first
if model == "3d_cascade_fullres" and lowres_segmentations is None:
print("lowres_segmentations is None. Attempting to predict 3d_lowres first...")
assert part_id == 0 and num_parts == 1, "if you don't specify a --lowres_segmentations folder for the " \
"inference of the cascade, custom values for part_id and num_parts " \
"are not supported. If you wish to have multiple parts, please " \
"run the 3d_lowres inference first (separately)"
model_folder_name = join(network_training_output_dir, "3d_lowres", task_name, trainer_class_name + "__" +
args.plans_identifier)
assert isdir(model_folder_name), "model output folder not found. Expected: %s" % model_folder_name
lowres_output_folder = join(output_folder, "3d_lowres_predictions")
predict_from_folder(model_folder_name, input_folder, lowres_output_folder, folds, False,
num_threads_preprocessing, num_threads_nifti_save, None, part_id, num_parts, not disable_tta,
overwrite_existing=overwrite_existing, mode=mode, overwrite_all_in_gpu=all_in_gpu,
mixed_precision=not args.disable_mixed_precision,
step_size=step_size)
lowres_segmentations = lowres_output_folder
torch.cuda.empty_cache()
print("3d_lowres done")
if model == "3d_cascade_fullres":
trainer = cascade_trainer_class_name
else:
trainer = trainer_class_name
model_folder_name = join(network_training_output_dir, model, task_name, trainer + "__" +
args.plans_identifier)
print("using model stored in ", model_folder_name)
assert isdir(model_folder_name), "model output folder not found. Expected: %s" % model_folder_name
predict_from_folder(model_folder_name, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, not disable_tta,
overwrite_existing=overwrite_existing, mode=mode, overwrite_all_in_gpu=all_in_gpu,
mixed_precision=not args.disable_mixed_precision,
step_size=step_size, checkpoint_name=args.chk)
if __name__ == "__main__":
main()
| [
"torch.cuda.empty_cache"
] | 1.6.0 | supersunshinefk/UMRFormer-Net | bf165ca2158a158f7c194c6201af2a4fcabe8742 |
1.7 | # encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 1
self.depth = 0.67
self.width = 0.75
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
# self.train_ann = "train.json"
# self.val_ann = "train.json"
self.train_ann = "rideflux_coco_train.json"
self.val_ann = "rideflux_coco_val.json"
self.input_size = (800, 1440)
self.test_size = (800, 1440)
self.random_size = (18, 32)
self.max_epoch = 80
self.print_interval = 20
self.eval_interval = 5
self.test_conf = 0.001
self.nmsthre = 0.7
self.no_aug_epochs = 10
self.basic_lr_per_img = 0.001 / 64.0
self.warmup_epochs = 1
def get_data_loader(self, batch_size, is_distributed, no_aug=False):
from yolox.data import (
MOTDataset,
TrainTransform,
YoloBatchSampler,
DataLoader,
InfiniteSampler,
MosaicDetection,
)
# dataset = MOTDataset(
# data_dir=os.path.join(get_yolox_datadir(), "mix_det"),
# json_file=self.train_ann,
# name='',
# img_size=self.input_size,
# preproc=TrainTransform(
# rgb_means=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225),
# max_labels=500,
# ),
# )
dataset = MOTDataset(
data_dir=os.path.join(get_yolox_datadir(), "rideflux"),
json_file=self.train_ann,
name='train',
img_size=self.input_size,
preproc=TrainTransform(
rgb_means=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
max_labels=500,
),
)
dataset = MosaicDetection(
dataset,
mosaic=not no_aug,
img_size=self.input_size,
preproc=TrainTransform(
rgb_means=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
max_labels=1000,
),
degrees=self.degrees,
translate=self.translate,
scale=self.scale,
shear=self.shear,
perspective=self.perspective,
enable_mixup=self.enable_mixup,
)
self.dataset = dataset
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = InfiniteSampler(
len(self.dataset), seed=self.seed if self.seed else 0
)
batch_sampler = YoloBatchSampler(
sampler=sampler,
batch_size=batch_size,
drop_last=False,
input_dimension=self.input_size,
mosaic=not no_aug,
)
dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
dataloader_kwargs["batch_sampler"] = batch_sampler
train_loader = DataLoader(self.dataset, **dataloader_kwargs)
return train_loader
def get_eval_loader(self, batch_size, is_distributed, testdev=False):
from yolox.data import MOTDataset, ValTransform
# valdataset = MOTDataset(
# data_dir=os.path.join(get_yolox_datadir(), "mot"),
# json_file=self.val_ann,
# img_size=self.test_size,
# name='train',
# preproc=ValTransform(
# rgb_means=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225),
# ),
# )
valdataset = MOTDataset(
data_dir=os.path.join(get_yolox_datadir(), "rideflux"),
json_file=self.val_ann,
img_size=self.test_size,
name='val',
preproc=ValTransform(
rgb_means=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
),
)
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
valdataset, shuffle=False
)
else:
sampler = torch.utils.data.SequentialSampler(valdataset)
dataloader_kwargs = {
"num_workers": self.data_num_workers,
"pin_memory": True,
"sampler": sampler,
}
dataloader_kwargs["batch_size"] = batch_size
val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
return val_loader
def get_evaluator(self, batch_size, is_distributed, testdev=False):
from yolox.evaluators import COCOEvaluator
val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
evaluator = COCOEvaluator(
dataloader=val_loader,
img_size=self.test_size,
confthre=self.test_conf,
nmsthre=self.nmsthre,
num_classes=self.num_classes,
testdev=testdev,
)
return evaluator
| [
"torch.distributed.get_world_size",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
] | 1.7 | monchana/ByteTrack | b9a9448fa01d661a07d74aed6f900a4f5e7c1f54 |
1.0 | # coding=utf-8
# Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch LED model. """
import math
import random
from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
Seq2SeqQuestionAnsweringModelOutput,
Seq2SeqSequenceClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_led import LEDConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "allenai/led-base-16384"
_CONFIG_FOR_DOC = "LEDConfig"
_TOKENIZER_FOR_DOC = "LEDTokenizer"
LED_PRETRAINED_MODEL_ARCHIVE_LIST = [
"allenai/led-base-16384",
# See all LED models at https://huggingface.co/models?filter=led
]
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
expanded_attention_mask = inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
# make sure that global_attn_mask is positive
expanded_attention_mask = expanded_attention_mask * inverted_mask
return expanded_attention_mask
class LEDLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__(num_embeddings, embedding_dim)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
# Copied from transformers.models.longformer.modeling_longformer.LongformerSelfAttention with Longformer->LEDEncoder
class LEDEncoderSelfAttention(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_heads = config.num_attention_heads
self.head_dim = int(config.hidden_size / config.num_attention_heads)
self.embed_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.embed_dim)
self.key = nn.Linear(config.hidden_size, self.embed_dim)
self.value = nn.Linear(config.hidden_size, self.embed_dim)
# separate projection layers for tokens with global attention
self.query_global = nn.Linear(config.hidden_size, self.embed_dim)
self.key_global = nn.Linear(config.hidden_size, self.embed_dim)
self.value_global = nn.Linear(config.hidden_size, self.embed_dim)
self.dropout = config.attention_probs_dropout_prob
self.layer_id = layer_id
attention_window = config.attention_window[self.layer_id]
assert (
attention_window % 2 == 0
), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
assert (
attention_window > 0
), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
self.one_sided_attn_window_size = attention_window // 2
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
"""
:class:`LEDEncoderSelfAttention` expects `len(hidden_states)` to be multiple of `attention_window`. Padding to
`attention_window` happens in :meth:`LEDEncoderModel.forward` to avoid redoing the padding on each layer.
The `attention_mask` is changed in :meth:`LEDEncoderModel.forward` from 0, 1, 2 to:
* -10000: no attention
* 0: local attention
* +10000: global attention
"""
hidden_states = hidden_states.transpose(0, 1)
# project hidden states
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
seq_len, batch_size, embed_dim = hidden_states.size()
assert (
embed_dim == self.embed_dim
), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}"
# normalize query
query_vectors /= math.sqrt(self.head_dim)
query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
attn_scores = self._sliding_chunks_query_key_matmul(
query_vectors, key_vectors, self.one_sided_attn_window_size
)
# values to pad for attention probs
remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]
# cast to fp32/fp16 then replace 1's with -inf
float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(
remove_from_windowed_attention_mask, -10000.0
)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size
)
# pad local attention probs
attn_scores += diagonal_mask
assert list(attn_scores.size()) == [
batch_size,
seq_len,
self.num_heads,
self.one_sided_attn_window_size * 2 + 1,
], f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}"
# compute local attention probs from global attention keys and contact over window dim
if is_global_attn:
# compute global attn indices required through out forward fn
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn)
# calculate global attn probs from global key
global_key_attn_scores = self._concat_with_global_key_attn_probs(
query_vectors=query_vectors,
key_vectors=key_vectors,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
)
# concat to local_attn_probs
# (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1)
# free memory
del global_key_attn_scores
attn_probs = F.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical stability
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0)
attn_probs = attn_probs.type_as(attn_scores)
# free memory
del attn_scores
# apply dropout
attn_probs = F.dropout(attn_probs, p=self.dropout, training=self.training)
value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
# compute local attention output with global attention value and add
if is_global_attn:
# compute sum of global and local attn
attn_output = self._compute_attn_output_with_global_indices(
value_vectors=value_vectors,
attn_probs=attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
)
else:
# compute local attn only
attn_output = self._sliding_chunks_matmul_attn_probs_value(
attn_probs, value_vectors, self.one_sided_attn_window_size
)
assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size"
attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous()
# compute value for global attention and overwrite to attention output
# TODO: remove the redundant computation
if is_global_attn:
global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(
hidden_states=hidden_states,
max_num_global_attn_indices=max_num_global_attn_indices,
layer_head_mask=layer_head_mask,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=is_index_masked,
)
# get only non zero global attn output
nonzero_global_attn_output = global_attn_output[
is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]
]
# overwrite values with global attention
attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(
len(is_local_index_global_attn_nonzero[0]), -1
)
# The attention weights for tokens with global attention are
# just filler values, they were never used to compute the output.
# Fill with 0 now, the correct values are in 'global_attn_probs'.
attn_probs[is_index_global_attn_nonzero] = 0
outputs = (attn_output.transpose(0, 1),)
if output_attentions:
outputs += (attn_probs,)
return outputs + (global_attn_probs,) if (is_global_attn and output_attentions) else outputs
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, padding):
"""pads rows and then flips rows and columns"""
hidden_states_padded = F.pad(
hidden_states_padded, padding
) # padding value is not important because it will be overwritten
hidden_states_padded = hidden_states_padded.view(
*hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2)
)
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_hidden_states):
"""
shift every row 1 step right, converting columns into diagonals.
Example::
chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492,
-1.8348, 0.7672, 0.2986, 0.0285,
-0.7584, 0.4206, -0.0405, 0.1599,
2.0514, -1.1600, 0.5372, 0.2629 ]
window_overlap = num_rows = 4
(pad & diagonalize) =>
[ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000
0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000
0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size()
chunked_hidden_states = F.pad(
chunked_hidden_states, (0, window_overlap + 1)
) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, -1
) # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap
chunked_hidden_states = chunked_hidden_states[
:, :, :-window_overlap
] # total_num_heads x num_chunks x window_overlap*window_overlap
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
)
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
@staticmethod
def _chunk(hidden_states, window_overlap):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
# non-overlapping chunks of size = 2w
hidden_states = hidden_states.view(
hidden_states.size(0),
hidden_states.size(1) // (window_overlap * 2),
window_overlap * 2,
hidden_states.size(2),
)
# use `as_strided` to make the chunks overlap with an overlap size = window_overlap
chunk_size = list(hidden_states.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(hidden_states.stride())
chunk_stride[1] = chunk_stride[1] // 2
return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)
@staticmethod
def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor:
beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0])
beginning_mask = beginning_mask_2d[None, :, None, :]
ending_mask = beginning_mask.flip(dims=(1, 3))
beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]
beginning_mask = beginning_mask.expand(beginning_input.size())
beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) # `== 1` converts to bool or uint8
ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :]
ending_mask = ending_mask.expand(ending_input.size())
ending_input.masked_fill_(ending_mask == 1, -float("inf")) # `== 1` converts to bool or uint8
def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
"""
Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained LEDEncoder) with an
overlap of size window_overlap
"""
batch_size, seq_len, num_heads, head_dim = query.size()
assert (
seq_len % (window_overlap * 2) == 0
), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
assert query.size() == key.size()
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
query = self._chunk(query, window_overlap)
key = self._chunk(key, window_overlap)
# matrix multiplication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
diagonal_chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (query, key)) # multiply
# convert diagonals into columns
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
diagonal_chunked_attention_scores, padding=(0, 0, 0, 1)
)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score from each word to itself, then
# followed by window_overlap columns for the upper triangle.
diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(
(batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1) : -1, window_overlap + 1 :
]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
:, 0, : window_overlap - 1, 1 - window_overlap :
]
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
def _sliding_chunks_matmul_attn_probs_value(
self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int
):
"""
Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
same shape as `attn_probs`
"""
batch_size, seq_len, num_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1
)
# group batch_size and num_heads dimensions into one
value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = F.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2)
@staticmethod
def _get_global_attn_indices(is_index_global_attn):
"""compute global attn indices required throughout forward pass"""
# helper variable
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
# max number of global attn indices in batch
max_num_global_attn_indices = num_global_attn_indices.max()
# indices of global attn
is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
# helper variable
is_local_index_global_attn = torch.arange(
max_num_global_attn_indices, device=is_index_global_attn.device
) < num_global_attn_indices.unsqueeze(dim=-1)
# location of the non-padding values within global attention indices
is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)
# location of the padding values within global attention indices
is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True)
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
):
batch_size = key_vectors.shape[0]
# create only global key vectors
key_vectors_only_global = key_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global))
attn_probs_from_global_key[
is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1]
] = -10000.0
return attn_probs_from_global_key
def _compute_attn_output_with_global_indices(
self,
value_vectors,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
):
batch_size = attn_probs.shape[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)
# get value vectors for global only
value_vectors_only_global = value_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]
# use `matmul` because `einsum` crashes sometimes with fp16
# attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
# compute attn output only global
attn_output_only_global = torch.matmul(
attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2)
).transpose(1, 2)
# reshape attn probs
attn_probs_without_global = attn_probs.narrow(
-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices
).contiguous()
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(
self,
hidden_states,
max_num_global_attn_indices,
layer_head_mask,
is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
is_index_masked,
):
seq_len, batch_size = hidden_states.shape[:2]
# prepare global hidden states
global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim)
global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[
is_index_global_attn_nonzero[::-1]
]
# global key, query, value
global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
global_key_vectors = self.key_global(hidden_states)
global_value_vectors = self.value_global(hidden_states)
# normalize
global_query_vectors_only_global /= math.sqrt(self.head_dim)
# reshape
global_query_vectors_only_global = (
global_query_vectors_only_global.contiguous()
.view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim)
global_key_vectors = (
global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
global_value_vectors = (
global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
# compute attn scores
global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2))
assert list(global_attn_scores.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
seq_len,
], f"global_attn_scores have the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is {global_attn_scores.size()}."
global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_scores[
is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], :
] = -10000.0
global_attn_scores = global_attn_scores.masked_fill(
is_index_masked[:, None, None, :],
-10000.0,
)
global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
# compute global attn probs
global_attn_probs_float = F.softmax(
global_attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
# apply layer head masking
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view(
batch_size, self.num_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs_float = global_attn_probs_float.view(
batch_size * self.num_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs = F.dropout(
global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training
)
# global attn output
global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)
assert list(global_attn_output.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
self.head_dim,
], f"global_attn_output tensor has the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is {global_attn_output.size()}."
global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_output = global_attn_output.view(
batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim
)
return global_attn_output, global_attn_probs
class LEDEncoderAttention(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
self.longformer_self_attn = LEDEncoderSelfAttention(config, layer_id=layer_id)
self.output = nn.Linear(config.d_model, config.d_model)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
is_index_masked: Optional[torch.Tensor] = None,
is_index_global_attn: Optional[torch.Tensor] = None,
is_global_attn: Optional[bool] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
self_outputs = self.longformer_self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
attn_output = self.output(self_outputs[0])
outputs = (attn_output,) + self_outputs[1:]
return outputs
class LEDDecoderAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class LEDEncoderLayer(nn.Module):
def __init__(self, config: LEDConfig, layer_id: int):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = LEDEncoderAttention(config, layer_id)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
"""
residual = hidden_states
attn_outputs = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
hidden_states = attn_outputs[0]
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return (hidden_states,) + attn_outputs[1:]
class LEDDecoderLayer(nn.Module):
def __init__(self, config: LEDConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = LEDDecoderAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = LEDDecoderAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`.
cross_attn_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`): Whether the base model outputs attentions.
This requires the attentions tensor to be reshaped in this function.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class LEDClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim: int,
inner_dim: int,
num_classes: int,
pooler_dropout: float,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class LEDPreTrainedModel(PreTrainedModel):
config_class = LEDConfig
base_model_prefix = "led"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
@dataclass
# Copied from transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput with Longformer->LEDEncoder
class LEDEncoderBaseModelOutput(ModelOutput):
"""
Base class for LEDEncoder's outputs, with potential hidden states, local and global attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LEDSeq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LEDSeq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LEDSeq2SeqSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LEDSeq2SeqQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence question answering models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_global_attentions: Optional[Tuple[torch.FloatTensor]] = None
LED_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.LEDConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
LED_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import LEDTokenizer, LEDForConditionalGeneration, LEDConfig
>>> model = LEDForConditionalGeneration.from_pretrained('allenai/led-base-16384')
>>> tokenizer = LEDTokenizer.from_pretrained('allenai/led-base-16384')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
"""
LED_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.LEDTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.LedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
LED uses the :obj:`eos_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read :func:`modeling_led._prepare_decoder_inputs` and
modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more
information on the default strategy.
global_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the `Longformer paper
<https://arxiv.org/abs/2004.05150>`__ for more details. Mask values selected in ``[0, 1]``:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
head_mask (:obj:`torch.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in ``[0,
1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class LEDEncoder(LEDPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`LEDEncoderLayer`.
Args:
config: LEDConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_encoder_position_embeddings
if isinstance(config.attention_window, int):
assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
assert config.attention_window > 0, "`config.attention_window` has to be positive"
config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer
else:
assert len(config.attention_window) == config.num_hidden_layers, (
"`len(config.attention_window)` should equal `config.num_hidden_layers`. "
f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
)
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = LEDLearnedPositionalEmbedding(
self.max_source_positions,
embed_dim,
)
self.layers = nn.ModuleList([LEDEncoderLayer(config, i) for i in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.init_weights()
def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor):
# longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
# (global_attention_mask + 1) => 1 for local attention, 2 for global attention
# => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
if attention_mask is not None:
attention_mask = attention_mask * (global_attention_mask + 1)
else:
# simply use `global_attention_mask` as `attention_mask`
# if no `attention_mask` is given
attention_mask = global_attention_mask + 1
return attention_mask
def _pad_to_window_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of Longformer self-attention."""
# padding
attention_window = (
self.config.attention_window
if isinstance(self.config.attention_window, int)
else max(self.config.attention_window)
)
assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
logger.info(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
if input_ids is not None:
input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embed_tokens(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens
return padding_len, input_ids, attention_mask, inputs_embeds
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.LEDTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
global_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the
task. For example, for classification, the <s> token should be given global attention. For QA, all
question tokens should also have global attention. Please refer to the `Longformer paper
<https://arxiv.org/abs/2004.05150>`__ for more details. Mask values selected in ``[0, 1]``:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
head_mask (:obj:`torch.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# check input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is None and inputs_embeds is None:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# create default attention_mask
if attention_mask is None:
attention_mask = torch.ones(inputs_embeds.size()[:-1], device=inputs_embeds.device, dtype=torch.long)
# merge `global_attention_mask` and `attention_mask`
if global_attention_mask is not None:
attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
# pad input if necessary
padding_len, input_ids, attention_mask, inputs_embeds = self._pad_to_window_size(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
# retrieve input_shape
if input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
# convert attention_mask to float
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, seq_len]; 1 -> 0.0; 0 -> "-inf"
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)[:, 0, 0, :]
# get masking tensors
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_global_attentions = () if (output_attentions and is_global_attn) else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, is_global_attn, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
is_index_masked,
is_index_global_attn,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
# bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),)
if is_global_attn:
# bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# undo padding
if padding_len > 0:
# unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1)
hidden_states = hidden_states[:, :-padding_len]
if not return_dict:
return tuple(
v for v in [hidden_states, encoder_states, all_attentions, all_global_attentions] if v is not None
)
return LEDEncoderBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_states,
attentions=all_attentions,
global_attentions=all_global_attentions,
)
class LEDDecoder(LEDPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`LEDDecoderLayer`
Args:
config: LEDConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_decoder_position_embeddings
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = LEDLearnedPositionalEmbedding(
self.max_target_positions,
config.d_model,
)
self.layers = nn.ModuleList([LEDDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.LEDTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
global_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to decide the attention given on each token, local attention or global attention. Tokens with
global attention attends to all other tokens, and all other tokens attend to them. This is important
for task-specific finetuning because it makes the model more flexible at representing the task. For
example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the `Longformer paper
<https://arxiv.org/abs/2004.05150>`__ for more details. Mask values selected in ``[0, 1]``:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None and combined_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = combined_attention_mask + _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
assert attn_mask.size()[0] == (
len(self.layers)
), f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
combined_attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare LED Model outputting raw hidden-states without any specific head on top.",
LED_START_DOCSTRING,
)
class LEDModel(LEDPreTrainedModel):
def __init__(self, config: LEDConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = LEDEncoder(config, self.shared)
self.decoder = LEDDecoder(config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
global_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a LEDEncoderBaseModelOutput when return_dict=False
elif return_dict and not isinstance(encoder_outputs, LEDEncoderBaseModelOutput):
encoder_outputs = LEDEncoderBaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
global_attentions=encoder_outputs[3] if len(encoder_outputs) > 3 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return LEDSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
encoder_global_attentions=encoder_outputs.global_attentions,
)
@add_start_docstrings(
"The LED Model with a language modeling head. Can be used for summarization.", LED_START_DOCSTRING
)
class LEDForConditionalGeneration(LEDPreTrainedModel):
base_model_prefix = "led"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder\.version",
r"decoder\.version",
r"lm_head\.weight",
]
def __init__(self, config: LEDConfig):
super().__init__(config)
self.led = LEDModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.led.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.led.shared.num_embeddings, bias=False)
self.init_weights()
def get_encoder(self):
return self.led.get_encoder()
def get_decoder(self):
return self.led.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(LED_GENERATION_EXAMPLE)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
global_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
Conditional generation example::
>>> from transformers import LEDTokenizer, LEDForConditionalGeneration
>>> tokenizer = LEDTokenizer.from_pretrained('allenai/led-base-16384')
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = LEDForConditionalGeneration.from_pretrained('allenai/led-base-16384')
>>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.led(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return LEDSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
encoder_global_attentions=outputs.encoder_global_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
@add_start_docstrings(
"""
LED model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
""",
LED_START_DOCSTRING,
)
class LEDForSequenceClassification(LEDPreTrainedModel):
def __init__(self, config: LEDConfig, **kwargs):
super().__init__(config, **kwargs)
self.led = LEDModel(config)
self.classification_head = LEDClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
self.led._init_weights(self.classification_head.dense)
self.led._init_weights(self.classification_head.out_proj)
@add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
global_attention_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.led(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id)
if len(torch.unique(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
:, -1, :
]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return LEDSeq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
encoder_global_attentions=outputs.encoder_global_attentions,
)
@add_start_docstrings(
"""
LED Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer
on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
LED_START_DOCSTRING,
)
class LEDForQuestionAnswering(LEDPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.led = LEDModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.led._init_weights(self.qa_outputs)
@add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
global_attention_mask=None,
start_positions=None,
end_positions=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if start_positions is not None and end_positions is not None:
use_cache = False
outputs = self.led(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (
start_logits,
end_logits,
) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return LEDSeq2SeqQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
encoder_global_attentions=outputs.encoder_global_attentions,
)
| [
"torch.nn.Linear",
"torch.cat",
"torch.einsum",
"torch.isnan",
"torch.finfo",
"torch.bmm",
"torch.masked_fill",
"torch.nn.functional.pad",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.tensor",
"torch.zeros",
"torch.nn.functional.dropout",
"torch.clamp",
"torch.isinf",
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.arange",
"torch.tanh",
"torch.nn.Embedding"
] | 1.0 | syskn/transformers | fafbd2574cb12b987099f69b3821814042d8f4ce |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.image.psnr import _psnr_compute, _psnr_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
class PeakSignalNoiseRatio(Metric):
r"""
Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR):
.. math:: \text{PSNR}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)}\right)
Where :math:`\text{MSE}` denotes the `mean-squared-error`_ function.
Args:
data_range:
the range of the data. If None, it is determined from the data (max - min).
The ``data_range`` must be given when ``dim`` is not None.
base: a base of a logarithm to use.
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
dim:
Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is
None meaning scores will be reduced across all dimensions and all batches.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``dim`` is not ``None`` and ``data_range`` is not given.
Example:
>>> from torchmetrics import PeakSignalNoiseRatio
>>> psnr = PeakSignalNoiseRatio()
>>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
>>> psnr(preds, target)
tensor(2.5527)
.. note::
Half precision is only support on GPU for this metric
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
min_target: Tensor
max_target: Tensor
def __init__(
self,
data_range: Optional[float] = None,
base: float = 10.0,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
dim: Optional[Union[int, Tuple[int, ...]]] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(**kwargs)
if dim is None and reduction != "elementwise_mean":
rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.")
if dim is None:
self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
else:
self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat")
self.add_state("total", default=[], dist_reduce_fx="cat")
if data_range is None:
if dim is not None:
# Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to
# calculate `data_range` in the future.
raise ValueError("The `data_range` must be given when `dim` is not None.")
self.data_range = None
self.add_state("min_target", default=tensor(0.0), dist_reduce_fx=torch.min)
self.add_state("max_target", default=tensor(0.0), dist_reduce_fx=torch.max)
else:
self.add_state("data_range", default=tensor(float(data_range)), dist_reduce_fx="mean")
self.base = base
self.reduction = reduction
self.dim = tuple(dim) if isinstance(dim, Sequence) else dim
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
"""Update state with predictions and targets.
Args:
preds: Predictions from model
target: Ground truth values
"""
sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim)
if self.dim is None:
if self.data_range is None:
# keep track of min and max target values
self.min_target = min(target.min(), self.min_target)
self.max_target = max(target.max(), self.max_target)
self.sum_squared_error += sum_squared_error
self.total += n_obs
else:
self.sum_squared_error.append(sum_squared_error)
self.total.append(n_obs)
def compute(self) -> Tensor:
"""Compute peak signal-to-noise ratio over state."""
if self.data_range is not None:
data_range = self.data_range
else:
data_range = self.max_target - self.min_target
if self.dim is None:
sum_squared_error = self.sum_squared_error
total = self.total
else:
sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])
total = torch.cat([values.flatten() for values in self.total])
return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)
| [
"torch.tensor"
] | 1.3.1 | aaronzs/metrics | 30019851186f2e224504ec76e87cc7f5170b7166 |
1.2 | #!/usr/bin/env python
# coding: utf-8
import yaml
import torch
import argparse
import numpy as np
# For reproducibility, comment these may speed up training
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Arguments
parser = argparse.ArgumentParser(description='Training E2E asr.')
parser.add_argument('--config', type=str, help='Path to experiment config.')
parser.add_argument('--name', default=None, type=str, help='Name for logging.')
parser.add_argument('--logdir', default='log/', type=str, help='Logging path.', required=False)
parser.add_argument('--ckpdir', default='ckpt/', type=str, help='Checkpoint path.', required=False)
parser.add_argument('--outdir', default='result/', type=str, help='Decode output path.', required=False)
parser.add_argument('--load', default=None, type=str, help='Load pre-trained model (for training only)', required=False)
parser.add_argument('--seed', default=0, type=int, help='Random seed for reproducable results.', required=False)
parser.add_argument('--cudnn-ctc', action='store_true', help='Switches CTC backend from torch to cudnn')
parser.add_argument('--njobs', default=4, type=int, help='Number of threads for dataloader/decoding.', required=False)
parser.add_argument('--cpu', action='store_true', help='Disable GPU training.')
parser.add_argument('--no-pin', action='store_true', help='Disable pin-memory for dataloader')
parser.add_argument('--test', action='store_true', help='Test the model.')
parser.add_argument('--no-msg', action='store_true', help='Hide all messages.')
parser.add_argument('--lm', action='store_true', help='Option for training RNNLM.')
parser.add_argument('--amp', action='store_true', help='Option to enable AMP.')
parser.add_argument('--reserve_gpu', default=0, type=float, help='Option to reserve GPU ram for training.')
parser.add_argument('--jit', action='store_true', help='Option for enabling jit in pytorch. (feature in development)')
parser.add_argument('--cuda', default=0, type=int, help='Choose which gpu to use.')
paras = parser.parse_args()
setattr(paras,'gpu',not paras.cpu)
setattr(paras,'pin_memory',not paras.no_pin)
setattr(paras,'verbose',not paras.no_msg)
config = yaml.load(open(paras.config,'r'), Loader=yaml.FullLoader)
print('[INFO] Using config {}'.format(paras.config))
np.random.seed(paras.seed)
torch.manual_seed(paras.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(paras.seed)
# print('There are ', torch.cuda.device_count(), ' device(s) available')
# print('Using device cuda:', str(paras.cuda))
# Hack to preserve GPU ram just incase OOM later on server
if paras.gpu and paras.reserve_gpu>0:
buff = torch.randn(int(paras.reserve_gpu*1e9//4)).to(torch.device('cuda:' + str(paras.cuda)))
del buff
if paras.lm:
# Train RNNLM
from bin.train_lm import Solver
mode = 'train'
else:
if paras.test:
# Test ASR
assert paras.load is None, 'Load option is mutually exclusive to --test'
from bin.test_asr2 import Solver
mode = 'test'
else:
# Train ASR
from bin.train_asr import Solver
mode = 'train'
solver = Solver(config,paras,mode)
solver.load_data()
solver.set_model()
solver.exec()
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all"
] | 1.2.0 | DanielLin94144/End-to-End-jointCTC-Attention-ASR | 2b8900f1f397d65d0e86972f7379bb3dfeb7c4ea |
1.8 | # This script is borrowed from https://github.com/LynnHo/DCGAN-LSGAN-WGAN-GP-DRAGAN-Pytorch
# which is also used by Durall et al. (https://ieeexplore.ieee.org/document/9157579)
import torch
def get_gan_losses_fn():
bce = torch.nn.BCEWithLogitsLoss()
def d_loss_fn(r_logit, f_logit):
r_loss = bce(r_logit, torch.ones_like(r_logit))
f_loss = bce(f_logit, torch.zeros_like(f_logit))
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = bce(f_logit, torch.ones_like(f_logit))
return f_loss
return d_loss_fn, g_loss_fn
def get_hinge_v1_losses_fn():
def d_loss_fn(r_logit, f_logit):
r_loss = torch.max(1 - r_logit, torch.zeros_like(r_logit)).mean()
f_loss = torch.max(1 + f_logit, torch.zeros_like(f_logit)).mean()
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = torch.max(1 - f_logit, torch.zeros_like(f_logit)).mean()
return f_loss
return d_loss_fn, g_loss_fn
def get_hinge_v2_losses_fn():
def d_loss_fn(r_logit, f_logit):
r_loss = torch.max(1 - r_logit, torch.zeros_like(r_logit)).mean()
f_loss = torch.max(1 + f_logit, torch.zeros_like(f_logit)).mean()
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = -f_logit.mean()
return f_loss
return d_loss_fn, g_loss_fn
def get_lsgan_losses_fn():
mse = torch.nn.MSELoss()
def d_loss_fn(r_logit, f_logit):
r_loss = mse(r_logit, torch.ones_like(r_logit))
f_loss = mse(f_logit, torch.zeros_like(f_logit))
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = mse(f_logit, torch.ones_like(f_logit))
return f_loss
return d_loss_fn, g_loss_fn
def get_wgan_losses_fn():
def d_loss_fn(r_logit, f_logit):
r_loss = -r_logit.mean()
f_loss = f_logit.mean()
return r_loss, f_loss
def g_loss_fn(f_logit):
f_loss = -f_logit.mean()
return f_loss
return d_loss_fn, g_loss_fn
def get_adversarial_losses_fn(mode):
if mode == 'gan':
return get_gan_losses_fn()
elif mode == 'hinge_v1':
return get_hinge_v1_losses_fn()
elif mode == 'hinge_v2':
return get_hinge_v2_losses_fn()
elif mode == 'lsgan':
return get_lsgan_losses_fn()
elif mode == 'wgan':
return get_wgan_losses_fn()
| [
"torch.zeros_like",
"torch.ones_like",
"torch.nn.MSELoss",
"torch.nn.BCEWithLogitsLoss"
] | 1.8.0 | sutd-visual-computing-group/Fourier-Discrepancies-CNN-Detection | ff313150035935c2c81167291ed82b7f13064f05 |
1.9 | import json
import os
import argparse
import scipy.stats as stats
import numpy as np
import pandas as pd
import torch
import random
import sys
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import time
import torch.utils.data as data
from sklearn.model_selection import StratifiedKFold
from collections import Counter
from tqdm import tqdm
from pathlib import Path
from pyhac.model import HACModel
from pyhac.utils.key_points import W_LIST_POSE, W2I_POSE, \
W_LIST_LEFT_HAND, W2I_LEFT_HAND, \
W_LIST_RIGHT_HAND, W2I_RIGHT_HAND, \
X_COLS, Y_COLS, \
ALL_XY_COLS, HAND_XY_COLS, \
LEFT_HAND_X_COLS, LEFT_HAND_Y_COLS, \
RIGHT_HAND_X_COLS, RIGHT_HAND_Y_COLS
from pyhac.utils.normalizer import normalize_data, normalize_hand_data
from pyhac.utils.data_augmentation import data_augmentation
class Dataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
assert self.X.shape[0] == self.y.shape[0]
def __len__(self):
return self.y.shape[0]
def __getitem__(self, idx):
return self.X[idx,:], self.y[idx]
def train(model, train_loader, val_loader, optimizer,
loss_fn, device, num_epochs, model_dir, actions,
target_columns_x):
model.train()
accs = []
losses = []
val_accs = []
val_losses = []
best_val_acc = 0.0
for epoch in tqdm(range(0, num_epochs)):
epoch_loss = 0.0
epoch_acc = 0.0
count = 0
for x, y in train_loader:
x = x.view(-1, model.in_channels, x.shape[1]//model.in_channels, 1).to(device).float()
y = y.to(device)
optimizer.zero_grad()
pred = model(x).reshape((y.shape[0], -1))
loss = loss_fn(pred, y)
epoch_loss += loss.item()
loss.backward()
epoch_acc += (pred.argmax(axis=1, keepdim=True).squeeze() == y).sum().item()
optimizer.step()
count += x.size()[0]
epoch_loss /= len(train_loader)
epoch_acc /= count
losses.append(epoch_loss)
accs.append(epoch_acc)
print("epoch loss:", epoch_loss)
print("acc:", epoch_acc)
if val_loader:
val_loss, val_acc = evaluate(model, val_loader, loss_fn, device)
val_losses.append(val_loss)
val_accs.append(val_acc)
if val_loader and (val_acc > best_val_acc):
best_val_acc = val_acc
filename = "best_model.pth"
Path(model_dir).mkdir(parents=True, exist_ok=True)
model_path = os.path.join(model_dir, filename)
print("save model to", model_path)
model_data = {
"model": model,
'epoch': epoch,
'optimizer': optimizer.state_dict(),
'actions': actions,
'target_columns': target_columns_x
}
torch.save(model_data, model_path)
return losses, accs, val_losses, val_accs
def evaluate(model, val_loader, loss_fn, device):
model.eval()
val_loss = 0.0
val_acc = 0.0
count = 0
for x, y in val_loader:
x = x.view(-1, model.in_channels, x.shape[1]//model.in_channels, 1).to(device).float()
y = y.to(device)
pred = model(x).reshape((y.shape[0], -1))
loss = loss_fn(pred, y)
val_loss += loss.item()
val_acc += (pred.argmax(axis=1, keepdim=True).squeeze() == y).sum().item()
count += x.size()[0]
val_loss /= len(val_loader)
val_acc /= count
print("val loss:", val_loss)
print("val acc:", val_acc)
return val_loss, val_acc
if __name__ == "__main__":
torch.random.manual_seed(5566)
np.random.seed(5566)
random.seed(5566)
parser = argparse.ArgumentParser()
parser.add_argument("--conf", help="a config file path in conf/action_sets")
parser.add_argument("--model_name", help="model name (e.g., mouse_control)")
parser.add_argument("--in_channels", type=int, default=3)
args = parser.parse_args()
with open(args.conf) as f:
conf_json = json.load(f)
data_path = os.path.join("data", "actions")
model_target = conf_json["type"]
actions = conf_json["actions"]
if model_target == "actions":
mode = "pose_hand"
target_columns_x = ALL_XY_COLS.copy()
ALL_XY_COLS += ["image_name", "label"] # keep label
elif model_target == "gesture_only":
mode = "hand"
target_columns_x = HAND_XY_COLS.copy()
HAND_XY_COLS += ["image_name", "label"] # keep label
else:
RunTimeError("???")
dfs = []
for idx, action in enumerate(actions):
file_path = os.path.join(data_path, action, "data.csv")
print(file_path)
df = pd.read_csv(file_path)
df.label = idx
dfs.append(df)
df_data = pd.concat(dfs)
df_data = df_data.reset_index(drop=True)
if model_target == "actions":
df_data = normalize_data(df_data)
if model_target == "gesture_only":
df_data = normalize_hand_data(df_data)
device = "cuda" if torch.cuda.is_available() else "cpu"
in_channels = args.in_channels
hop = 2
batch_size = 8
num_epochs = 100
num_class = len(actions)
skf = StratifiedKFold(n_splits=5)
model_dir = os.path.join("pyhac", "trained_model", "gcn", args.model_name)
k_fold_losses = []
k_fold_accs = []
k_fold_val_losses = []
k_fold_val_accs = []
count = 0
for train_index, test_index in skf.split(df_data[target_columns_x].values, df_data.label.values):
if count <= 1:
count += 1
continue
df_train = data_augmentation(df_data.iloc[train_index])
X = df_train[target_columns_x].values
y = df_train["label"].values
val_X = df_data[target_columns_x].values[test_index]
val_y = df_data["label"].values[test_index]
dataset = Dataset(X, y)
val_dataset = Dataset(val_X, val_y)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
model = HACModel(in_channels, num_class, hop, mode).to('cuda')
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
losses, accs, val_losses, val_accs = train(model, train_loader, val_loader, optimizer,
loss_fn, device, num_epochs, model_dir, actions,
target_columns_x)
k_fold_losses.append(losses)
k_fold_accs.append(accs)
k_fold_val_losses.append(val_losses)
k_fold_val_accs.append(val_accs)
# only train once when generating model for a real usage
break | [
"torch.save",
"torch.random.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss"
] | 1.9.0 | zroger49/hac | 5905369344c985d5293d572a610c82308306e385 |
1.3 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torch
from classy_vision.generic.util import is_pos_int
def add_generic_args(parser):
"""
Adds generic command-line arguments for convnet training / testing to parser.
"""
parser.add_argument(
"--config_file", type=str, help="path to config file for model", required=True
)
parser.add_argument(
"--device",
default=None,
type=str,
help="device to use: either 'cpu' or 'gpu'. If unspecified, will use GPU when available and CPU otherwise.",
)
parser.add_argument(
"--num_workers",
default=4,
type=int,
help="number of dataloading workers (default = 4)",
)
parser.add_argument(
"--checkpoint_folder",
default="",
type=str,
help="""folder to use for checkpoints:
epochal checkpoints are stored as model_<epoch>.torch,
latest epoch checkpoint is at checkpoint.torch""",
)
parser.add_argument(
"--pretrained_checkpoint_folder",
default="",
type=str,
help="""folder to use for pre-trained checkpoints:
epochal checkpoints are stored as model_<epoch>.torch,
latest epoch checkpoint is at checkpoint.torch,
checkpoint is used for fine-tuning task, and it will
not resume training from the checkpoint""",
)
parser.add_argument(
"--checkpoint_period",
default=1,
type=int,
help="""Checkpoint every x phases (default 1)""",
)
parser.add_argument(
"--show_progress",
default=False,
action="store_true",
help="shows progress bar during training / testing",
)
parser.add_argument(
"--skip_tensorboard",
default=False,
action="store_true",
help="do not perform tensorboard visualization",
)
parser.add_argument(
"--visdom_server",
default="",
type=str,
help="visdom server to use (default None)",
)
parser.add_argument(
"--visdom_port",
default=8097,
type=int,
help="port of visdom server (default = 8097)",
)
parser.add_argument(
"--profiler",
default=False,
action="store_true",
help="specify this argument to profile training code",
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="specify this argument for debugging mode",
)
parser.add_argument(
"--ignore_checkpoint_config",
default=False,
action="store_true",
help="""specify this argument to ignore
the compatibility of the config (or lack of config) attached
to the checkpoint; this will allow mismatches between
the training specified in the config and the
actual training of the model""",
)
parser.add_argument(
"--log_freq",
default=5,
type=int,
help="Logging frequency for LossLrMeterLoggingHook (default 5)",
)
parser.add_argument(
"--image_backend",
default="PIL",
type=str,
help="torchvision image decoder backend (PIL or accimage). Default PIL",
)
parser.add_argument(
"--video_backend",
default="pyav",
type=str,
help="torchvision video decoder backend (pyav or video_reader). Default pyav",
)
parser.add_argument(
"--distributed_backend",
default="none",
type=str,
help="""Distributed backend: either 'none' (for non-distributed runs)
or 'ddp' (for distributed runs). Default none.""",
)
return parser
def check_generic_args(args):
"""
Perform assertions on generic command-line arguments.
"""
# check types and values:
assert is_pos_int(args.num_workers), "incorrect number of workers"
assert is_pos_int(args.visdom_port), "incorrect visdom port"
assert (
args.device is None or args.device == "cpu" or args.device == "gpu"
), "unknown device"
# check that CUDA is available:
if args.device == "gpu":
assert torch.cuda.is_available(), "CUDA required to train on GPUs"
# create checkpoint folder if it does not exist:
if args.checkpoint_folder != "" and not os.path.exists(args.checkpoint_folder):
os.makedirs(args.checkpoint_folder, exist_ok=True)
assert os.path.exists(args.checkpoint_folder), (
"could not create folder %s" % args.checkpoint_folder
)
# when in debugging mode, enter debugger upon error:
if args.debug:
import sys
from classy_vision.generic.debug import debug_info
sys.excepthook = debug_info
# check visdom server name:
if args.visdom_server != "":
if args.visdom_server.startswith("https://"):
print("WARNING: Visdom does not work over HTTPS.")
args.visdom_server = args.visdom_server[8:]
if not args.visdom_server.startswith("http://"):
args.visdom_server = "http://%s" % args.visdom_server
# return input arguments:
return args
def get_parser():
"""
Return a standard command-line parser.
"""
parser = argparse.ArgumentParser(
description="""Start a Classy Vision training job.
This can be used for training on your local machine, using CPU or GPU, and
for distributed training. This script also supports Tensorboard, Visdom and
checkpointing."""
)
parser = add_generic_args(parser)
return parser
def parse_train_arguments(parser=None):
"""
Assert and parse the command-line arguments of a given (or default) parser.
"""
# set input arguments:
if parser is None:
parser = get_parser()
# parse input arguments:
args = parser.parse_args()
# assertions:
args = check_generic_args(args)
return args
| [
"torch.cuda.is_available"
] | 1.3.1 | miguelvr/ClassyVision | 38a59270e16fda83e160c5888b96c777cb78757b |
1.11 | import torch
from torch import nn
__author__ = "Daniel-Tobias Rademaker"
####################################################
# Single GNN-layer #
####################################################
class GNN_layer(nn.Module):
def __init__( # pylint: disable=too-many-arguments
self,
nmb_edge_projection,
nmb_hidden_attr,
nmb_output_features,
message_vector_length,
nmb_mlp_neurons,
act_fn=nn.SiLU(),
is_last_layer=True,
):
super().__init__()
# The MLP that takes in atom-pairs and creates the Mij's
self.edge_mlp = nn.Sequential(
nn.Linear(nmb_edge_projection + nmb_hidden_attr * 2, nmb_mlp_neurons),
act_fn,
nn.Linear(nmb_mlp_neurons, message_vector_length),
act_fn,
)
# The node-MLP, creates a new node-representation given the Mi's
self.node_mlp = nn.Sequential(
nn.BatchNorm1d(message_vector_length + nmb_hidden_attr),
nn.Linear(message_vector_length + nmb_hidden_attr, nmb_mlp_neurons),
act_fn,
nn.Linear(nmb_mlp_neurons, nmb_mlp_neurons),
act_fn,
nn.Linear(nmb_mlp_neurons, nmb_hidden_attr),
)
# Only last layer have attention and output modules
if is_last_layer:
# attention mlp, to weight the ouput significance
self.attention_mlp = nn.Sequential(
nn.Linear(nmb_hidden_attr, nmb_mlp_neurons),
act_fn,
nn.Linear(nmb_mlp_neurons, 1),
nn.Sigmoid(),
)
# Create the output vector per node we are interested in
self.output_mlp = nn.Sequential(
nn.Linear(nmb_hidden_attr, nmb_mlp_neurons),
act_fn,
nn.Linear(nmb_mlp_neurons, nmb_output_features),
)
# MLP that takes in the node-attributes of nodes (source + target), the edge attributes
# and node attributes in order to create a 'message vector'between those
# nodes
def edge_model(self, edge_attr, hidden_features_source, hidden_features_target):
cat = torch.cat(
[edge_attr, hidden_features_source, hidden_features_target], dim=1
)
output = self.edge_mlp(cat)
return output
# A function that updates the node-attributes. Assumed that submessages
# are already summed
def node_model(self, summed_edge_message, hidden_features):
cat = torch.cat([summed_edge_message, hidden_features], dim=1)
output = self.node_mlp(cat)
return hidden_features + output
# Sums the individual sub-messages (multiple per node) into singel message
# vector per node
def sum_messages(self, edges, messages, nmb_nodes):
row, _ = edges
summed_messages_shape = (nmb_nodes, messages.size(1))
result = messages.new_full(summed_messages_shape, 0)
row = row.unsqueeze(-1).expand(-1, messages.size(1))
result.scatter_add_(0, row, messages)
return result
# Runs the GNN
# steps is number of times it exanges info with neighbors
def update_nodes(self, edges, edge_attr, hidden_features, steps=1):
(
row,
col,
) = edges # a single edge is defined as the index of atom1 and the index of atom2
h = hidden_features # shortening the variable name
# It is possible to run input through the same same layer multiple
# times
for _ in range(steps):
node_pair_messages = self.edge_model(
edge_attr, h[row], h[col]
) # get all atom-pair messages
# sum all messages per node to single message vector
messages = self.sum_messages(edges, node_pair_messages, len(h))
# Use the messages to update the node-attributes
h = self.node_model(messages, h)
return h
# output, every node creates a prediction + an estimate how sure it is of
# its prediction. Only done by last 'GNN layer'
def output(self, hidden_features, get_attention=True):
output = self.output_mlp(hidden_features)
if get_attention:
return output, self.attention_mlp(hidden_features)
return output
################################################################
# GNN super class #
################################################################
class SuperGNN(nn.Module):
def __init__( # pylint: disable=too-many-arguments
self,
nmb_edge_attr,
nmb_node_attr,
nmb_hidden_attr,
nmb_mlp_neurons,
nmb_edge_projection,
nmb_gnn_layers,
nmb_output_features,
message_vector_length,
act_fn=nn.SiLU(),
):
super().__init__()
# Since edge_atributes go into every layer, it might be betetr to learn
# a better/smarter representation of them first
self.preproc_edge_mlp = nn.Sequential(
nn.BatchNorm1d(nmb_edge_attr),
nn.Linear(nmb_edge_attr, nmb_mlp_neurons),
nn.BatchNorm1d(nmb_mlp_neurons),
act_fn,
nn.Linear(nmb_mlp_neurons, nmb_edge_projection),
act_fn,
)
# Project the node_attributes to the same size as the hidden vector
self.preproc_node_mlp = nn.Sequential(
nn.BatchNorm1d(nmb_node_attr),
nn.Linear(nmb_node_attr, nmb_mlp_neurons),
nn.BatchNorm1d(nmb_mlp_neurons),
act_fn,
nn.Linear(nmb_mlp_neurons, nmb_hidden_attr),
act_fn,
)
self.modlist = nn.ModuleList(
[
GNN_layer(
nmb_edge_projection,
nmb_hidden_attr,
nmb_output_features,
message_vector_length,
nmb_mlp_neurons,
is_last_layer=(gnn_layer == (nmb_gnn_layers - 1)),
)
for gnn_layer in range(nmb_gnn_layers)
]
)
# always use this function before running the GNN layers
def preprocess(self, edge_attr, node_attr):
edge_attr = self.preproc_edge_mlp(edge_attr)
hidden_features = self.preproc_node_mlp(node_attr)
return edge_attr, hidden_features
# Runs data through layers and return output. Potentially, attention can
# also be returned
def runThroughNetwork(
self, edges, edge_attr, node_attr, with_output_attention=False
):
edge_attr, node_attr = self.preprocess(edge_attr, node_attr)
for layer in self.modlist:
node_attr = layer.update_nodes(edges, edge_attr, node_attr)
if with_output_attention:
representations, attention = self.modlist[-1].output(node_attr, True)
return representations, attention
representations = self.modlist[-1].output(node_attr, True)
return representations
######################
# The alignment GNN #
######################
class Alignment_GNN(SuperGNN):
def __init__( # pylint: disable=too-many-arguments
self,
nmb_edge_attr,
nmb_node_attr,
nmb_output_features,
nmb_hidden_attr,
message_vector_length,
nmb_mlp_neurons,
nmb_gnn_layers,
nmb_edge_projection,
act_fn=nn.SiLU(),
):
super().__init__(
nmb_edge_attr,
nmb_node_attr,
nmb_hidden_attr,
nmb_mlp_neurons,
nmb_edge_projection,
nmb_gnn_layers,
nmb_output_features,
message_vector_length,
act_fn,
)
# Run over all layers, and return the ouput vectors
def forward(self, edges, edge_attr, node_attr):
representations = self.runThroughNetwork(edges, edge_attr, node_attr)
return representations
if __name__ == "__main__":
#####################################
# Example of initializing a gnn #
#####################################
##############################
# CONSTANTS #
##############################
MESSAGE_VECTOR_LENGTH = 32
NMB_HIDDED_ATTRIBUTES = 32
NMB_EDGE_PROJECTION = 32
NMB_OUPUT_FEATURES = 32
NMB_MLP_NEURONS = 32
NMB_GNN_LAYERS = 5
NODE_RADIUS = 15
##############################
NMB_EDGE_ATTIBUTES = 4
NMB_NODE_ATTRIBUTES = 3 + 21
gnn = Alignment_GNN(
nmb_edge_attr=NMB_EDGE_ATTIBUTES,
nmb_node_attr=NMB_NODE_ATTRIBUTES,
nmb_output_features=NMB_OUPUT_FEATURES,
nmb_gnn_layers=NMB_GNN_LAYERS,
message_vector_length=MESSAGE_VECTOR_LENGTH,
nmb_mlp_neurons=NMB_MLP_NEURONS,
nmb_hidden_attr=NMB_HIDDED_ATTRIBUTES,
nmb_edge_projection=NMB_EDGE_PROJECTION,
)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Sigmoid",
"torch.nn.SiLU",
"torch.nn.BatchNorm1d"
] | 1.11.0 | DeepRank/deeprank-gnn-2 | 9d1b5f254ae25364bec88ba6e82a6aa1022fc699 |
1.8 | # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data import RandomSampler
from torch.utils.data import SequentialSampler
from torch.utils.data import TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from tqdm import trange
from transformers import AdamW
from transformers import AutoConfig
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
from transformers import get_linear_schedule_with_warmup
from transformers import glue_compute_metrics as compute_metrics
from transformers import (
glue_convert_examples_to_features as convert_examples_to_features,
)
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
from transformers import WEIGHTS_NAME
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter # type: ignore
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum(
(tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES),
(),
)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
"""Train the model"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(
log_dir=os.getenv("TENSORBOARD_LOG_PATH", "/tensorboard_logs/")
)
if args.local_rank == -1:
rank, world_size = 0, 1
else:
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = (
RandomSampler(train_dataset)
if args.local_rank == -1
else DistributedSampler(train_dataset, rank=rank, num_replicas=world_size)
)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps
// (len(train_dataloader) // args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(
os.path.join(args.model_name_or_path, "optimizer.pt")
) and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt")):
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))
)
scheduler.load_state_dict(
torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (
len(train_dataloader) // args.gradient_accumulation_steps
)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(
" Continuing training from checkpoint, will skip to saved global_step"
)
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(
" Will skip the first %d steps in the first epoch",
steps_trained_in_current_epoch,
)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]
)
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[
0
] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= args.gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm
)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0]
and args.logging_steps > 0
and global_step % args.logging_steps == 0
):
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = f"eval_{key}"
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if (
args.local_rank in [-1, 0]
and args.save_steps > 0
and global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(
args.output_dir, f"checkpoint-{global_step}"
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(
optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")
)
torch.save(
scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")
)
logger.info(
"Saving optimizer and scheduler states to %s", output_dir
)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = (
("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
)
eval_outputs_dirs = (
(args.output_dir, args.output_dir + "-MM")
if args.task_name == "mnli"
else (args.output_dir,)
)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(
args, eval_task, tokenizer, evaluate=True
)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size
)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info(f"***** Running evaluation {prefix} *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2]
if args.model_type in ["bert", "xlnet", "albert"]
else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0
)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info(f"***** Eval results {prefix} *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write(f"{key} = {str(result[key])}\n")
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = (
processor.get_dev_examples(args.data_dir)
if evaluate
else processor.get_train_examples(args.data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long
)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long
)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_labels
)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: "
+ ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--do_train", action="store_true", help="Whether to run training."
)
parser.add_argument(
"--do_eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight decay if we apply some."
)
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--logging_steps", type=int, default=500, help="Log every X updates steps."
)
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip", type=str, default="", help="For distant debugging."
)
parser.add_argument(
"--server_port", type=str, default="", help="For distant debugging."
)
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True
)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(
args, args.task_name, tokenizer, evaluate=False
)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = AutoModelForSequenceClassification.from_pretrained(args.output_dir)
tokenizer = AutoTokenizer.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case
)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)
)
)
logging.getLogger("transformers.modeling_utils").setLevel(
logging.WARN
) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = (
checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
)
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = {k + f"_{global_step}": v for k, v in result.items()}
results.update(result)
return results
if __name__ == "__main__":
main()
| [
"torch.distributed.get_world_size",
"torch.utils.data.RandomSampler",
"torch.cuda.is_available",
"torch.load",
"torch.nn.DataParallel",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.save",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.distributed.barrier",
"torch.utils.data.TensorDataset",
"torch.no_grad",
"torch.utils.data.distributed.DistributedSampler"
] | 1.8.0 | Berumotto1/ml-platform-sdk-python | fc30300552bbeed5d97e8846beb040c9d262d23e |
1.0 | import torch
import torch.nn as nn
import torch.nn.functional as F
class ConditionedCNNClassifier(nn.Module):
def __init__(self, net_cfg, embed_cfg):
super().__init__()
self.net_cfg = net_cfg
self.embed_cfg = embed_cfg
print('----------- Model Config---------------')
print(f'Headline Embedding Size: {self.embed_cfg["H_V"]}')
print(f'Body Embedding Size: {self.embed_cfg["B_V"]}')
print(f'Number of Classes: {self.net_cfg["num_classes"]}')
print('---------------------------------------')
self.h_embedding = nn.Embedding(self.embed_cfg['H_V'], self.embed_cfg['D'])
self.b_embedding = nn.Embedding(self.embed_cfg['B_V'], self.embed_cfg['D'])
self.convs_headline = nn.ModuleList(
[self.n_gram_conv(n, self.net_cfg['h_num_filt'])
for n in self.net_cfg['h_n_list']])
self.convs_body = nn.ModuleList(
[self.n_gram_conv(n, self.net_cfg['b_num_filt'])
for n in self.net_cfg['b_n_list']])
self.fc_out = nn.Sequential(
nn.Linear(
(len(self.net_cfg['b_n_list']) * self.net_cfg['b_num_filt']) +
(len(self.net_cfg['h_n_list']) * self.net_cfg['h_num_filt']), 1024),
nn.ReLU(),
nn.Dropout(self.net_cfg['dropout_rate']),
nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(self.net_cfg['dropout_rate']),
nn.Linear(256, self.net_cfg['num_classes'])
)
def n_gram_conv(self, n, num_filt):
return nn.Sequential(
nn.Conv2d(
in_channels = 1,
out_channels = num_filt,
kernel_size = (n, self.embed_cfg['D'])),
nn.ReLU())
def forward(self, h, b):
# h = (Batch, Sentence Words)
# b = (Batch, Sentence Words)
h = self.h_embedding(h) # (Batch, Word, Vector)
b = self.b_embedding(b) # (Batch, Word, Vector)
h = h.unsqueeze(1) # (Batch, 1, Word, Vector)
b = b.unsqueeze(1) # (Batch, 1, Word, Vector)
# (Batch, Num_Filters, Num_Feature_Map, 1) * len(h_n_list)
h_convs_out = [conv(h) for conv in self.convs_headline]
# (Batch, Num_Filters, Num_Feature_Map, 1) * len(b_n_list)
b_convs_out = [conv(b) for conv in self.convs_body]
# (Batch, Num_Filters, Num_Feature_Map) * len(h_n_list)
h_convs_out = [output.squeeze(3) for output in h_convs_out]
# (Batch, Num_Filters, Num_Feature_Map) * len(b_n_list)
b_convs_out = [output.squeeze(3) for output in b_convs_out]
# (Batch, Num_Filters, 1) * len(h_n_list)
# MaxPool1D: 2nd arg is kernel size
# the stride is taken to be equal to kernel size by default
h_convs_out = [F.max_pool1d(h_conv_out, h_conv_out.shape[2])
for h_conv_out in h_convs_out]
# (Batch, Num_Filters, 1) * len(b_n_list)
b_convs_out = [F.max_pool1d(b_conv_out, b_conv_out.shape[2])
for b_conv_out in b_convs_out]
# (Batch, Num_Filters) * len(h_n_list)
h_convs_out = [h_conv_out.squeeze(2) for h_conv_out in h_convs_out]
# (Batch, Num_Filters) * len(h_n_list)
b_convs_out = [b_conv_out.squeeze(2) for b_conv_out in b_convs_out]
# (Batch, Num_Filters * len(h_n_list))
h_feature_vec = torch.cat(h_convs_out, dim = 1)
b_feature_vec = torch.cat(b_convs_out, dim = 1)
h_b_ft = torch.cat([h_feature_vec, b_feature_vec], dim = 1)
logits = self.fc_out(h_b_ft)
return logits, h_feature_vec, b_feature_vec
class ConditionedSharedCNNClassifier(nn.Module):
def __init__(self, net_cfg, embed_cfg):
super().__init__()
self.net_cfg = net_cfg
self.embed_cfg = embed_cfg
print('----------- Model Config---------------')
print(f'Headline Embedding Size: {self.embed_cfg["H_V"]}')
print(f'Body Embedding Size: {self.embed_cfg["B_V"]}')
print(f'Number of Classes: {self.net_cfg["num_classes"]}')
print('---------------------------------------')
self.h_embedding = nn.Embedding(self.embed_cfg['H_V'], self.embed_cfg['D'])
self.b_embedding = nn.Embedding(self.embed_cfg['B_V'], self.embed_cfg['D'])
self.shared_convs = nn.ModuleList(
[self.n_gram_conv(n, self.net_cfg['num_filt'])
for n in self.net_cfg['n_list']])
self.fc_out = nn.Sequential(
nn.Linear(
(2 * len(self.net_cfg['n_list']) * self.net_cfg['num_filt']), 1024),
nn.ReLU(),
nn.Dropout(self.net_cfg['dropout_rate']),
nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(self.net_cfg['dropout_rate']),
nn.Linear(256, self.net_cfg['num_classes'])
)
def n_gram_conv(self, n, num_filt):
return nn.Sequential(
nn.Conv2d(
in_channels = 1,
out_channels = num_filt,
kernel_size = (n, self.embed_cfg['D'])),
nn.ReLU())
def forward(self, h, b):
# h = (Batch, Sentence Words)
# b = (Batch, Sentence Words)
h = self.h_embedding(h) # (Batch, Word, Vector)
b = self.b_embedding(b) # (Batch, Word, Vector)
h = h.unsqueeze(1) # (Batch, 1, Word, Vector)
b = b.unsqueeze(1) # (Batch, 1, Word, Vector)
# (Batch, Num_Filters, Num_Feature_Map, 1) * len(n_list)
h_convs_out = [conv(h) for conv in self.shared_convs]
# (Batch, Num_Filters, Num_Feature_Map, 1) * len(n_list)
b_convs_out = [conv(b) for conv in self.shared_convs]
# (Batch, Num_Filters, Num_Feature_Map) * len(n_list)
h_convs_out = [output.squeeze(3) for output in h_convs_out]
# (Batch, Num_Filters, Num_Feature_Map) * len(n_list)
b_convs_out = [output.squeeze(3) for output in b_convs_out]
# (Batch, Num_Filters, 1) * len(n_list)
# MaxPool1D: 2nd arg is kernel size
# the stride is taken to be equal to kernel size by default
h_convs_out = [F.max_pool1d(h_conv_out, h_conv_out.shape[2])
for h_conv_out in h_convs_out]
# (Batch, Num_Filters, 1) * len(n_list)
b_convs_out = [F.max_pool1d(b_conv_out, b_conv_out.shape[2])
for b_conv_out in b_convs_out]
# (Batch, Num_Filters) * len(n_list)
h_convs_out = [h_conv_out.squeeze(2) for h_conv_out in h_convs_out]
# (Batch, Num_Filters) * len(n_list)
b_convs_out = [b_conv_out.squeeze(2) for b_conv_out in b_convs_out]
# (Batch, Num_Filters * len(h_n_list))
h_feature_vec = torch.cat(h_convs_out, dim = 1)
b_feature_vec = torch.cat(b_convs_out, dim = 1)
h_b_ft = torch.cat([h_feature_vec, b_feature_vec], dim = 1)
logits = self.fc_out(h_b_ft)
return logits, h_feature_vec, b_feature_vec
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.max_pool1d",
"torch.nn.Embedding"
] | 1.0 | varshanth/FakeNewsChallenge-FNC1 | 57cc26c62f73953bf49a2be7e35426c28c055991 |
1.10 | import argparse
import random
import sys
from pathlib import Path
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from mnist_networks import medium_cnn
from model_manifold.data_matrix import batch_data_matrix_trace_rank
from model_manifold.plot import save_ranks, save_mean_trace
def train_epoch(
model: nn.Module, loader: DataLoader, optimizer: Optimizer, epoch: int
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
log_interval = len(loader) // 10
device = next(model.parameters()).device
model.train()
steps = []
ranks = []
traces = []
reference_batch = exemplar_batch(1000, train=True).to(device)
for batch_idx, (data, target) in enumerate(loader, start=1):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
if batch_idx % log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
steps.append(batch_idx)
batch_traces, batch_ranks = batch_data_matrix_trace_rank(
model, reference_batch
)
traces.append(batch_traces)
ranks.append(batch_ranks)
optimizer.step()
steps = torch.tensor(steps)
ranks = torch.stack(ranks, dim=1)
traces = torch.stack(traces, dim=1)
return steps, ranks, traces
def test(model: nn.Module, loader: DataLoader) -> float:
device = next(model.parameters()).device
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction="sum").item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(loader.dataset)
print(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss,
correct,
len(loader.dataset),
100.0 * correct / len(loader.dataset),
)
)
return test_loss
def mnist_loader(batch_size: int, train: bool) -> DataLoader:
loader = torch.utils.data.DataLoader(
datasets.MNIST(
"data",
train=train,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
),
batch_size=batch_size,
shuffle=train,
num_workers=1,
pin_memory=True,
)
return loader
def exemplar_batch(batch_size: int, train: bool) -> torch.Tensor:
dataset = datasets.MNIST(
"data",
train=train,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
examples = []
for i in range(batch_size):
examples.append(dataset[i][0])
batch = torch.stack(examples, dim=0)
return batch
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train a basic model on MNIST",
usage="python3 mnist_training.py [--batch-size BATCH-SIZE "
"--epochs EPOCHS --lr LR --seed SEED --output-dir OUTPUT-DIR]",
)
parser.add_argument("--batch-size", type=int, default=60, help="Batch size")
parser.add_argument("--epochs", type=int, default=30, help="Number of epochs")
parser.add_argument("--lr", type=float, default=0.01, help="Learning rate")
parser.add_argument("--seed", type=int, default=42, help="Random seed")
parser.add_argument(
"--output-dir",
type=str,
default="checkpoint",
help="Model checkpoint output directory",
)
args = parser.parse_args(sys.argv[1:])
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
output_dir = Path(args.output_dir).expanduser()
output_dir.mkdir(parents=True, exist_ok=True)
model = medium_cnn()
optimizer = optim.SGD(model.parameters(), lr=args.lr)
train_loader = mnist_loader(args.batch_size, train=True)
test_loader = mnist_loader(args.batch_size, train=False)
global_steps = []
global_ranks = []
global_traces = []
for epoch in range(args.epochs):
epoch_steps, epoch_ranks, epoch_traces = train_epoch(
model, train_loader, optimizer, epoch + 1
)
global_steps.append(epoch_steps + epoch * len(train_loader))
global_ranks.append(epoch_ranks)
global_traces.append(epoch_traces)
test(model, test_loader)
torch.save(model.state_dict(), output_dir / f"medium_cnn_{epoch + 1:02d}.pt")
global_steps = torch.cat(global_steps, dim=0)
global_ranks = torch.cat(global_ranks, dim=1)
global_traces = torch.cat(global_traces, dim=1)
save_mean_trace(
global_steps,
global_traces,
output_dir / "traces_medium_cnn.pdf",
)
save_ranks(
global_steps,
global_ranks,
output_dir / "ranks_medium_cnn.pdf",
)
| [
"torch.cat",
"torch.stack",
"torch.no_grad",
"torch.manual_seed",
"torch.tensor",
"torch.nn.functional.nll_loss"
] | 1.10.1 | lucagrementieri/model-manifold | 41042a4c4f7cf702ca6c57fdd20d698c7ed46a52 |
1.6 | import argparse
import logging
import os
import sys
import socket
import json
import pickle
import torch
from datetime import datetime
from transformers import AutoConfig, AutoTokenizer, AutoModel
from torch.utils.data import DataLoader, RandomSampler
from .framework import RerankerFramework
from ..datasets import (EfficientQARerankerDatasetForBaselineReranker_TRAIN,
EfficientQARerankerDatasetForBaselineReranker,
BaselineRerankerQueryBuilder)
from ..models import BaselineReranker
from ...common.utility.utility import setup_logging
LOGGER = logging.getLogger(__name__)
def build_parser():
parser = argparse.ArgumentParser(description='Passages Reranker training process.')
parser.add_argument("--config", default=None, help="")
parser.add_argument("--train", default="./data/train_wiki.jsonl", help="train dataset")
parser.add_argument("--val", default="./data/val_wiki.jsonl", help="validation dataset")
parser.add_argument("--database", default="./data/wiki.db", help="database with full passages")
parser.add_argument("--hard_negatives", default=None, help="")
parser.add_argument("--encoder", default="roberta-base", help="name or path to encoder")
parser.add_argument("--cache_dir", default=None, help="cache directory")
parser.add_argument("--max_length", default=512, type=int, help="maximum length of the input sequence")
parser.add_argument("--checkpoint_dir", default=".checkpoints", help="directory to saving checkpoints")
parser.add_argument("--no_gpu", action="store_true", help="no use GPU")
parser.add_argument("--train_batch_size", default=20, type=int, help="mini-batch size")
parser.add_argument("--eval_batch_size", default=100, type=int, help="mini-batch size")
parser.add_argument("--iter_size", default=8, type=int, help="accumulated gradient")
parser.add_argument("--num_epoch", default=5, type=int, help="number of epochs")
parser.add_argument("--lr", default=1, type=int, help="learning rate")
parser.add_argument("--fp16", action="store_true", help="train with fp16")
parser.add_argument("--criterion", default=None, help="loss function (CE/BCE)")
return parser
def binary_cross_entropy():
def inner(logits, target):
logits = logits.squeeze(0)
batch_size = logits.shape[0]
one_hots = torch.zeros(batch_size, device=target.get_device())
one_hots[target] = 1.
return criterion(logits, one_hots)
criterion = torch.nn.BCEWithLogitsLoss(reduction="sum")
return inner
def get_dataloader_for_baseline_reranker(dataset, random_sampler=False):
if random_sampler:
sampler = RandomSampler(dataset)
dataloader = DataLoader(
dataset,
sampler=sampler,
collate_fn=lambda batch: batch[0]
)
else:
dataloader = DataLoader(
dataset,
collate_fn=lambda batch: batch[0]
)
return dataloader
def train(args):
LOGGER.info("Config: " + json.dumps(args, sort_keys=True, indent=2))
config = AutoConfig.from_pretrained(args["encoder"], cache_dir=args["cache_dir"])
tokenizer = AutoTokenizer.from_pretrained(args["encoder"], cache_dir=args["cache_dir"], use_fast=False)
LOGGER.info("Load datasets.")
if args["hard_negatives"]:
with open(args["hard_negatives"], "rb") as file_:
negatives = pickle.load(file_)
else:
negatives = None
model_config = {
"reranker_model_type": "baseline",
"encoder": args["encoder"],
"encoder_config": config,
"max_length": args["max_length"],
"negatives": negatives != None
}
query_builder = BaselineRerankerQueryBuilder(tokenizer, args["max_length"])
train_dataset = EfficientQARerankerDatasetForBaselineReranker_TRAIN(args["train"], args["database"], tokenizer, query_builder, args["train_batch_size"], negative_samples=negatives, shuffle_predicted_indices=True)
val_dataset = EfficientQARerankerDatasetForBaselineReranker(args["val"], args["database"], query_builder, args["eval_batch_size"])
train_dataloader = get_dataloader_for_baseline_reranker(train_dataset, random_sampler=True)
val_dataloader = get_dataloader_for_baseline_reranker(val_dataset, random_sampler=False)
LOGGER.info("Reranker training configuration: " + json.dumps(args, indent=4, sort_keys=True))
LOGGER.info("Model inicialization.")
LOGGER.info(f"Cuda is available: {torch.cuda.is_available()}")
device = torch.device("cuda:0" if torch.cuda.is_available() and not args["no_gpu"] else "cpu")
framework = RerankerFramework(device, model_config, train_dataloader, val_dataloader)
encoder = AutoModel.from_pretrained(args["encoder"], cache_dir=args["cache_dir"])
model = BaselineReranker(config, encoder)
model = model.to(device)
save_ckpt = None
checkpoint_name = "reranker_"
checkpoint_name+= args["encoder"].split('/')[-1]
checkpoint_name+= "_" + datetime.today().strftime('%Y-%m-%d-%H-%M')
checkpoint_name+= "_" + socket.gethostname()
if args["checkpoint_dir"]:
if not os.path.isdir(args["checkpoint_dir"]):
os.mkdir(args["checkpoint_dir"])
save_ckpt = os.path.join(args["checkpoint_dir"], checkpoint_name)
LOGGER.info("Training started.")
if args["criterion"] == "CE":
LOGGER.info(f"Cross entropy is used.")
criterion = torch.nn.CrossEntropyLoss()
elif args["criterion"] == "BCE":
LOGGER.info(f"Binary cross entropy is used.")
checkpoint_name+= "_" + "BCE-loss"
criterion = binary_cross_entropy()
else:
LOGGER.warn(f'Unknown \'{args["criterion"]}\' loss function. Default loss function is used.')
criterion = None
framework.train(model,
learning_rate=args["lr"],
batch_size=args["train_batch_size"],
iter_size=args["iter_size"],
num_epoch=args["num_epoch"],
save_ckpt=save_ckpt,
fp16=args["fp16"],
criterion=criterion)
LOGGER.info("Training completed.")
if __name__ == "__main__":
setup_logging(os.path.basename(sys.argv[0]).split(".")[0],
logpath=".logs/",
config_path="configurations/logging.yml")
parser = build_parser()
args = parser.parse_args()
if args.config:
if not os.path.exists(args.config):
LOGGER.error("Config file does not found.")
sys.exit(1)
with open(args.config) as file_:
jsons = json.load(file_)
args.__dict__.update(jsons)
train(var(args))
| [
"torch.utils.data.RandomSampler",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss"
] | 1.6.0 | Ankur3107/scalingQA | f648e34a9e4d7d4dbc2549a3c8767b6a25e3c447 |
1.9 | import torch
import random
import numpy as np
import pandas as pd
def seeding(seed=31):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.set_printoptions(precision=3, sci_mode=False)
pd.set_option("mode.chained_assignment", None)
pd.options.display.float_format = "{:.3f}".format
np.set_printoptions(linewidth=np.inf, precision=3, suppress=True)
def init_device():
"""
Setting device CUDNN option
"""
device = "cuda:0" if torch.cuda.is_available() else "cpu"
return torch.device(device)
| [
"torch.device",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.set_printoptions",
"torch.cuda.is_available"
] | 1.9.1 | strong-win/Timeband | 179ca45ce9cb3efb686d837c8df6cdad4932e59e |
0.2 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import getopt
import sys
import os
import math
import time
import argparse
from visdom import Visdom
sys.path.insert(0, os.path.join('..', '..'))
import torch as T
from torch.autograd import Variable as var
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.utils import clip_grad_norm
from dnc.dnc import DNC
from dnc.sdnc import SDNC
from dnc.sam import SAM
from dnc.util import *
parser = argparse.ArgumentParser(description='PyTorch Differentiable Neural Computer')
parser.add_argument('-input_size', type=int, default=6, help='dimension of input feature')
parser.add_argument('-rnn_type', type=str, default='lstm', help='type of recurrent cells to use for the controller')
parser.add_argument('-nhid', type=int, default=100, help='number of hidden units of the inner nn')
parser.add_argument('-dropout', type=float, default=0, help='controller dropout')
parser.add_argument('-memory_type', type=str, default='dnc', help='dense or sparse memory: dnc | sdnc | sam')
parser.add_argument('-nlayer', type=int, default=1, help='number of layers')
parser.add_argument('-nhlayer', type=int, default=2, help='number of hidden layers')
parser.add_argument('-lr', type=float, default=1e-4, help='initial learning rate')
parser.add_argument('-optim', type=str, default='adam', help='learning rule, supports adam|rmsprop')
parser.add_argument('-clip', type=float, default=50, help='gradient clipping')
parser.add_argument('-batch_size', type=int, default=100, metavar='N', help='batch size')
parser.add_argument('-mem_size', type=int, default=20, help='memory dimension')
parser.add_argument('-mem_slot', type=int, default=16, help='number of memory slots')
parser.add_argument('-read_heads', type=int, default=4, help='number of read heads')
parser.add_argument('-sparse_reads', type=int, default=10, help='number of sparse reads per read head')
parser.add_argument('-temporal_reads', type=int, default=2, help='number of temporal reads')
parser.add_argument('-sequence_max_length', type=int, default=4, metavar='N', help='sequence_max_length')
parser.add_argument('-cuda', type=int, default=-1, help='Cuda GPU ID, -1 for CPU')
parser.add_argument('-iterations', type=int, default=2000, metavar='N', help='total number of iteration')
parser.add_argument('-summarize_freq', type=int, default=100, metavar='N', help='summarize frequency')
parser.add_argument('-check_freq', type=int, default=100, metavar='N', help='check point frequency')
parser.add_argument('-visdom', action='store_true', help='plot memory content on visdom per -summarize_freq steps')
args = parser.parse_args()
print(args)
viz = Visdom()
# assert viz.check_connection()
if args.cuda != -1:
print('Using CUDA.')
T.manual_seed(1111)
else:
print('Using CPU.')
def llprint(message):
sys.stdout.write(message)
sys.stdout.flush()
def onehot(x, n):
ret = np.zeros(n).astype(np.float32)
ret[x] = 1.0
return ret
def generate_data(length, size):
content = np.random.randint(0, size - 1, length)
seqlen = length + 1
x_seq_list = [float('nan')] * seqlen
max_value = 0
max_ind = 0
for i in range(seqlen):
if (i < length):
x_seq_list[i] = onehot(content[i], size)
if (max_value <= content[i]):
max_value = content[i]
max_ind = i
else:
x_seq_list[i] = onehot(size - 1, size)
x_seq_list = np.array(x_seq_list)
x_seq_list = x_seq_list.reshape((1,) + x_seq_list.shape)
x_seq_list = np.reshape(x_seq_list, (1, -1, size))
target_output = np.zeros((1, 1, seqlen), dtype=np.float32)
target_output[:, -1, -1] = max_ind
target_output = np.reshape(target_output, (1, -1, 1))
weights_vec = np.zeros((1, 1, seqlen), dtype=np.float32)
weights_vec[:, -1, -1] = 1.0
weights_vec = np.reshape(weights_vec, (1, -1, 1))
return cudavec(x_seq_list, gpu_id=args.cuda).float(), \
cudavec(target_output, gpu_id=args.cuda).float(), \
cudavec(weights_vec, gpu_id=args.cuda)
if __name__ == '__main__':
dirname = os.path.dirname(__file__)
ckpts_dir = os.path.join(dirname, 'checkpoints')
input_size = args.input_size
memory_type = args.memory_type
lr = args.lr
clip = args.clip
batch_size = args.batch_size
sequence_max_length = args.sequence_max_length
cuda = args.cuda
iterations = args.iterations
summarize_freq = args.summarize_freq
check_freq = args.check_freq
visdom = args.visdom
from_checkpoint = None
if args.memory_type == 'dnc':
rnn = DNC(
input_size=args.input_size,
hidden_size=args.nhid,
rnn_type=args.rnn_type,
num_layers=args.nlayer,
num_hidden_layers=args.nhlayer,
dropout=args.dropout,
nr_cells=args.mem_slot,
cell_size=args.mem_size,
read_heads=args.read_heads,
gpu_id=args.cuda,
debug=args.visdom,
batch_first=True,
independent_linears=False
)
elif args.memory_type == 'sdnc':
rnn = SDNC(
input_size=args.input_size,
hidden_size=args.nhid,
rnn_type=args.rnn_type,
num_layers=args.nlayer,
num_hidden_layers=args.nhlayer,
dropout=args.dropout,
nr_cells=args.mem_slot,
cell_size=args.mem_size,
sparse_reads=args.sparse_reads,
temporal_reads=args.temporal_reads,
read_heads=args.read_heads,
gpu_id=args.cuda,
debug=args.visdom,
batch_first=True,
independent_linears=False
)
elif args.memory_type == 'sam':
rnn = SAM(
input_size=args.input_size,
hidden_size=args.nhid,
rnn_type=args.rnn_type,
num_layers=args.nlayer,
num_hidden_layers=args.nhlayer,
dropout=args.dropout,
nr_cells=args.mem_slot,
cell_size=args.mem_size,
sparse_reads=args.sparse_reads,
read_heads=args.read_heads,
gpu_id=args.cuda,
debug=args.visdom,
batch_first=True,
independent_linears=False
)
else:
raise Exception('Not recognized type of memory')
if args.cuda != -1:
rnn = rnn.cuda(args.cuda)
print(rnn)
last_save_losses = []
if args.optim == 'adam':
optimizer = optim.Adam(rnn.parameters(), lr=args.lr, eps=1e-9, betas=[0.9, 0.98]) # 0.0001
elif args.optim == 'adamax':
optimizer = optim.Adamax(rnn.parameters(), lr=args.lr, eps=1e-9, betas=[0.9, 0.98]) # 0.0001
elif args.optim == 'rmsprop':
optimizer = optim.RMSprop(rnn.parameters(), lr=args.lr, momentum=0.9, eps=1e-10) # 0.0001
elif args.optim == 'sgd':
optimizer = optim.SGD(rnn.parameters(), lr=args.lr) # 0.01
elif args.optim == 'adagrad':
optimizer = optim.Adagrad(rnn.parameters(), lr=args.lr)
elif args.optim == 'adadelta':
optimizer = optim.Adadelta(rnn.parameters(), lr=args.lr)
last_100_losses = []
(chx, mhx, rv) = (None, None, None)
for epoch in range(iterations + 1):
llprint("\rIteration {ep}/{tot}".format(ep=epoch, tot=iterations))
optimizer.zero_grad()
# We use for training just (sequence_max_length / 10) examples
random_length = np.random.randint(2, (sequence_max_length) + 1)
input_data, target_output, loss_weights = generate_data(random_length, input_size)
if rnn.debug:
output, (chx, mhx, rv), v = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)
else:
output, (chx, mhx, rv) = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)
loss = T.mean(((loss_weights * output).sum(-1, keepdim=True) - target_output) ** 2)
loss.backward()
T.nn.utils.clip_grad_norm(rnn.parameters(), args.clip)
optimizer.step()
loss_value = loss.data[0]
# detach memory from graph
mhx = { k : (v.detach() if isinstance(v, var) else v) for k, v in mhx.items() }
summarize = (epoch % summarize_freq == 0)
take_checkpoint = (epoch != 0) and (epoch % iterations == 0)
last_100_losses.append(loss_value)
try:
if summarize:
output = (loss_weights * output).sum().data.cpu().numpy()[0]
target_output = target_output.sum().data.cpu().numpy()
llprint("\rIteration %d/%d" % (epoch, iterations))
llprint("\nAvg. Logistic Loss: %.4f\n" % (np.mean(last_100_losses)))
print(target_output)
print("Real value: ", ' = ' + str(int(target_output[0])))
print("Predicted: ", ' = ' + str(int(output // 1)) + " [" + str(output) + "]")
last_100_losses = []
if take_checkpoint:
llprint("\nSaving Checkpoint ... "),
check_ptr = os.path.join(ckpts_dir, 'step_{}.pth'.format(epoch))
cur_weights = rnn.state_dict()
T.save(cur_weights, check_ptr)
llprint("Done!\n")
except Exception as e:
pass
llprint("\nTesting generalization...\n")
rnn.eval()
for i in range(int((iterations + 1) / 10)):
llprint("\nIteration %d/%d" % (i, iterations))
# We test now the learned generalization using sequence_max_length examples
random_length = np.random.randint(2, sequence_max_length * 2 + 1)
input_data, target_output, loss_weights = generate_data(random_length, input_size)
if rnn.debug:
output, (chx, mhx, rv), v = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)
else:
output, (chx, mhx, rv) = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)
output = output[:, -1, :].sum().data.cpu().numpy()[0]
target_output = target_output.sum().data.cpu().numpy()
try:
print("\nReal value: ", ' = ' + str(int(target_output[0])))
print("Predicted: ", ' = ' + str(int(output // 1)) + " [" + str(output) + "]")
except Exception as e:
pass
| [
"torch.save",
"torch.manual_seed"
] | 0.2.0 | kierkegaard13/pytorch-dnc | b21a705ccb88f72fe3723e3c1e1f2bbe741f01b6 |
0.1 | import torch
import torch.nn as nn
from gneiss.cluster import random_linkage
from gneiss.balances import sparse_balance_basis
from scipy.sparse import coo_matrix
import numpy as np
from torch.distributions import Multinomial, Normal
from catvae.composition import ilr
from catvae.distributions.mvn import MultivariateNormalFactorIdentity
from typing import Callable
import warnings
LOG_2_PI = np.log(2.0 * np.pi)
class LinearCatVAE(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int,
init_scale: float = 0.001,
basis: coo_matrix = None,
encoder_depth: int = 1,
imputer: Callable[[torch.Tensor], torch.Tensor] = None,
batch_size: int = 10, bias: bool = True):
super(LinearCatVAE, self).__init__()
warnings.warn('LinearCatVAE is going to be deprecated in a '
'future version of this package', DeprecationWarning)
self.initialize(input_dim, hidden_dim, init_scale,
basis, encoder_depth, imputer,
batch_size, bias)
def initialize(self, input_dim: int, hidden_dim: int,
init_scale: float = 0.001,
basis: coo_matrix = None,
encoder_depth: int = 1,
imputer: Callable[[torch.Tensor], torch.Tensor] = None,
batch_size: int = 10, bias: bool = True):
self.hidden_dim = hidden_dim
self.bias = bias
# Psi must be dimension D - 1 x D
if basis is None:
tree = random_linkage(input_dim)
basis = sparse_balance_basis(tree)[0].copy()
indices = np.vstack((basis.row, basis.col))
Psi = torch.sparse_coo_tensor(
indices.copy(), basis.data.astype(np.float32).copy(),
requires_grad=False)
# Psi.requires_grad = False
self.input_dim = Psi.shape[0]
if imputer is None:
self.imputer = lambda x: x + 1
else:
self.imputer = imputer
if encoder_depth > 1:
self.first_encoder = nn.Linear(
self.input_dim, hidden_dim, bias=self.bias)
num_encoder_layers = encoder_depth
layers = []
layers.append(self.first_encoder)
for layer_i in range(num_encoder_layers - 1):
layers.append(nn.Softplus())
layers.append(
nn.Linear(hidden_dim, hidden_dim, bias=self.bias))
self.encoder = nn.Sequential(*layers)
# initialize
for encoder_layer in self.encoder:
if isinstance(encoder_layer, nn.Linear):
encoder_layer.weight.data.normal_(0.0, init_scale)
else:
self.encoder = nn.Linear(self.input_dim, hidden_dim,
bias=self.bias)
self.encoder.weight.data.normal_(0.0, init_scale)
self.decoder = nn.Linear(hidden_dim, self.input_dim, bias=False)
self.variational_logvars = nn.Parameter(torch.zeros(hidden_dim))
self.log_sigma_sq = nn.Parameter(torch.tensor(0.01))
self.eta = nn.Parameter(torch.zeros(batch_size, self.input_dim))
self.eta.data.normal_(0.0, init_scale)
zI = torch.ones(self.hidden_dim).to(self.eta.device)
zm = torch.zeros(self.hidden_dim).to(self.eta.device)
self.register_buffer('Psi', Psi)
self.register_buffer('zI', zI)
self.register_buffer('zm', zm)
def encode(self, x):
hx = ilr(self.imputer(x), self.Psi)
z = self.encoder(hx)
return z
def forward(self, x):
hx = ilr(self.imputer(x), self.Psi)
z_mean = self.encoder(hx)
mu = self.decoder(z_mean)
W = self.decoder.weight
# penalties
D = torch.exp(self.variational_logvars)
var = torch.exp(self.log_sigma_sq)
qdist = MultivariateNormalFactorIdentity(mu, var, D, W)
logp = self.Psi.t() @ self.eta.t()
prior_loss = Normal(self.zm, self.zI).log_prob(z_mean).mean()
logit_loss = qdist.log_prob(self.eta).mean()
mult_loss = Multinomial(logits=logp.t()).log_prob(x).mean()
loglike = mult_loss + logit_loss + prior_loss
return -loglike
def reset(self, x):
hx = ilr(self.imputer(x), self.Psi)
self.eta.data = hx.data
def get_reconstruction_loss(self, x):
hx = ilr(self.imputer(x), self.Psi)
z_mean = self.encoder(hx)
eta = self.decoder(z_mean)
logp = self.Psi.t() @ eta.t()
mult_loss = Multinomial(logits=logp.t()).log_prob(x).mean()
return - mult_loss
class LinearBatchCatVAE(LinearCatVAE):
def __init__(self, input_dim: int, hidden_dim: int,
init_scale: float = 0.001,
basis: coo_matrix = None,
encoder_depth: int = 1,
imputer: Callable[[torch.Tensor], torch.Tensor] = None,
batch_size: int = 10, bias: bool = True):
super(LinearBatchCatVAE, self).__init__(
input_dim, hidden_dim, init_scale,
basis, encoder_depth, imputer,
batch_size, bias
)
def encode(self, x):
hx = ilr(self.imputer(x), self.Psi)
z = self.encoder(hx)
return z
def forward(self, x, B):
hx = ilr(self.imputer(x), self.Psi)
batch_effects = (self.Psi @ B.t()).t()
hx -= batch_effects # Subtract out batch effects
z_mean = self.encoder(hx)
mu = self.decoder(z_mean)
mu += batch_effects # Add batch effects back in
W = self.decoder.weight
# penalties
D = torch.exp(self.variational_logvars)
var = torch.exp(self.log_sigma_sq)
qdist = MultivariateNormalFactorIdentity(mu, var, D, W)
logp = self.Psi.t() @ self.eta.t()
prior_loss = Normal(self.zm, self.zI).log_prob(z_mean).mean()
logit_loss = qdist.log_prob(self.eta).mean()
mult_loss = Multinomial(logits=logp.t()).log_prob(x).mean()
loglike = mult_loss + logit_loss + prior_loss
return -loglike
def get_reconstruction_loss(self, x, B):
hx = ilr(self.imputer(x), self.Psi)
batch_effects = (self.Psi @ B.t()).t()
hx -= batch_effects # Subtract out batch effects
z_mean = self.encoder(hx)
eta = self.decoder(z_mean)
eta += batch_effects # Add batch effects back in
logp = self.Psi.t() @ eta.t()
mult_loss = Multinomial(logits=logp.t()).log_prob(x).mean()
return - mult_loss
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Sequential",
"torch.distributions.Normal",
"torch.ones",
"torch.tensor",
"torch.nn.Softplus",
"torch.exp"
] | 0.1.0 | flatironinstitute/catvae | 003a46682fc33e5b0d66c17e85e59e464a465c53 |
1.5 | import torch
import torch.nn.functional as F
from .base import Loss
from .base import Mode
from .base import Reduction
class CrossEntropyLoss(Loss):
"""
CE with optional smoothing and support for multiple positive labels.
Can accept one-hot encoded y_trues
Args:
mode (str): Metric mode {'binary', 'multiclass'}
'binary' - calculate binary cross entropy
'multiclass' - calculate categorical cross entropy
smoothing (float): How much to smooth values toward uniform
weight (Tensor): A manual rescaling weight given to each class.
If given, has to be a Tensor of size C. If `mode` is binary
weight should be weight of positive class
reduction (str): The reduction type to apply to the output. {'none', 'mean', 'sum'}.
NOTE: reduction is only supported for `binary` mode! for other modes it's always `mean`
'none' - no reduction will be applied
'sum' - the output will be summed
'mean' - the sum of the output will be divided by the number of elements in the output
from_logits (bool): If False assumes sigmoid has already been applied to model output
"""
def __init__(self, mode="multiclass", smoothing=0.0, weight=None, reduction="mean", from_logits=True):
super().__init__()
self.mode = Mode(mode)
self.reduction = Reduction(reduction)
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.from_logits = from_logits
weight = torch.Tensor([1.0]) if weight is None else torch.tensor(weight)
self.register_buffer("weight", weight)
def forward(self, y_pred, y_true):
if self.mode == Mode.BINARY:
# squeeze to allow different shapes like BSx1xHxW vs BSxHxW
if self.from_logits:
loss = F.binary_cross_entropy_with_logits(
y_pred.squeeze(), y_true.squeeze(), pos_weight=self.weight, reduction=self.reduction.value
)
else:
loss = F.binary_cross_entropy( # no pos weight in this case
y_pred.squeeze(), y_true.squeeze(), reduction=self.reduction.value
)
if self.reduction == Reduction.NONE:
loss = loss.view(*y_pred.shape) # restore true shape
return loss
if len(y_true.shape) != 1:
y_true_one_hot = y_true.float()
else:
y_true_one_hot = torch.zeros_like(y_pred)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1.0)
y_pred = y_pred.float()
logprobs = F.log_softmax(y_pred, dim=1) if self.from_logits else y_pred.log()
# loss of each sample is weighted by it's target class
logprobs = logprobs * self.weight
sample_weights = self.weight * y_true_one_hot
# multiple labels handling
nll_loss = -logprobs * y_true_one_hot
nll_loss = nll_loss.sum(1)
smooth_loss = -logprobs.mean(dim=1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.sum().div(sample_weights.sum())
| [
"torch.zeros_like",
"torch.Tensor",
"torch.tensor",
"torch.nn.functional.log_softmax"
] | 1.5 | vladserkoff/pytorch-tools | c838b5776b3d7e2d9d20c98432db400d5d842144 |
1.10 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import torch
import functorch
from functorch import vmap
import torch.utils._pytree as pytree
from functorch_lagging_op_db import functorch_lagging_op_db
from functorch_additional_op_db import additional_op_db
from torch.testing._internal.common_methods_invocations import DecorateInfo
import os
import unittest
from torch.testing._internal.common_device_type import toleranceOverride
IS_FBCODE = os.getenv('FUNCTORCH_TEST_FBCODE') == '1'
def loop(op, in_dims, out_dim, batch_size, *batched_args, **kwarg_values):
outs = []
for idx in range(batch_size):
flat_args, args_spec = pytree.tree_flatten(batched_args)
flat_dims, dims_spec = pytree.tree_flatten(in_dims)
assert(args_spec == dims_spec)
new_args = [a.select(in_dim, idx) if in_dim is not None else a for a, in_dim in zip(flat_args, flat_dims)]
out = op(*pytree.tree_unflatten(new_args, args_spec), **kwarg_values)
outs.append(out)
loop_out = []
if isinstance(outs[0], torch.Tensor):
loop_out = torch.stack(outs)
else:
for idx in range(len(outs[0])):
loop_out.append(torch.stack([i[idx] for i in outs], out_dim))
return loop_out
def get_exhaustive_batched_inputs(arg_values, kwarg_values, batch_size=3, bdims=(0, -1), for_batch_norm=False):
assert bdims == (0,) or bdims == (0, -1)
def add_batch_dim(arg, bdim, batch_size=3):
assert bdim == 0 or bdim == -1
if isinstance(arg, torch.Tensor):
if bdim == 0:
shape = [1] * len(arg.shape)
shape.insert(bdim, batch_size)
return (arg.repeat(shape), bdim)
if bdim == -1:
arg = arg.unsqueeze(-1).expand(*arg.shape, batch_size).contiguous()
return (arg, bdim)
assert False
else:
return (arg, None)
for bdim in bdims:
batch_choices = []
def add_batch_choices(a):
if isinstance(a, torch.Tensor):
batched_val = add_batch_dim(a, bdim, batch_size)
batch_choices.append((batched_val, (a, None)))
else:
batch_choices.append(((a, None),))
flat_args, arg_spec = pytree.tree_flatten(tuple(arg_values))
if for_batch_norm:
# Batch norm is unique because the running_mean and running_var are updated in place.
# Therefore, they cannot be unbatched if the input is batched. The case where both are
# unbatched is added at the end
if len(flat_args) >= 3:
add_batch_choices(flat_args[0]) # input can be batched or unbatched
batch_choices.append((add_batch_dim(flat_args[1], bdim, batch_size),)) # running_mean must be batched
batch_choices.append((add_batch_dim(flat_args[2], bdim, batch_size),)) # running_var must be batched
orig_flat_args = flat_args
flat_args = orig_flat_args[3:]
else:
# TODO: None defaults in instance norm create empty tensors that are written to and mean that we must
# have unbatched inputs. None in the running mean/running var shouldn't make a tensor
batch_choices.append(((flat_args[0], None),)) # input must be unbatched
if len(flat_args) == 2:
batch_choices.append((add_batch_dim(flat_args[1], bdim, batch_size),))
orig_flat_args = flat_args
flat_args = []
for arg in flat_args:
add_batch_choices(arg)
for batched_values in itertools.product(*batch_choices):
batched_args, in_dims = zip(*batched_values)
if all([i is None for i in in_dims]):
continue
yield pytree.tree_unflatten(batched_args, arg_spec), pytree.tree_unflatten(in_dims, arg_spec), kwarg_values
if for_batch_norm and len(orig_flat_args) >= 2:
# Adds the case where input, running_mean, and running_var are all unbatched
batch_choices[0] = ((orig_flat_args[0], None),)
batch_choices[1] = ((orig_flat_args[1], None),)
if len(orig_flat_args) >= 3:
batch_choices[2] = ((orig_flat_args[2], None),)
for batched_values in itertools.product(*batch_choices):
batched_args, in_dims = zip(*batched_values)
if all([i is None for i in in_dims]):
continue
batched_args_tuple = pytree.tree_unflatten(batched_args, arg_spec)
in_dims_tuple = pytree.tree_unflatten(in_dims, arg_spec)
yield batched_args_tuple, in_dims_tuple, kwarg_values
def get_exhaustive_batched_inputs_for_batch_norm(arg_values, kwarg_values, batch_size=3, bdims=(0, -1)):
return get_exhaustive_batched_inputs(arg_values, kwarg_values,
batch_size=batch_size, bdims=bdims, for_batch_norm=True)
def get_fallback_and_vmap_exhaustive(op, arg_values, kwarg_values, opinfo=None, compute_loop_out=True, bdims=(0, -1)):
out_dim = 0
batch_size = 4
generator = get_exhaustive_batched_inputs(arg_values, kwarg_values, batch_size, bdims=bdims)
batch_norm_fns = ("nn.functional.batch_norm", "nn.functional.instance_norm") # instance norm calls batch norm
if opinfo is not None and opinfo.name in batch_norm_fns:
generator = get_exhaustive_batched_inputs_for_batch_norm(arg_values, kwarg_values, batch_size, bdims=bdims)
for batched_args, in_dims, kwarg_values in generator:
if compute_loop_out:
loop_out = loop(op, in_dims, out_dim, batch_size, *batched_args, **kwarg_values)
else:
loop_out = None
# Used for debugging the resulting operations
# from functorch import make_fx
# def f(a):
# return op(a)
# t = make_fx(vmap(f, in_dims=in_dims, out_dims=out_dim))(*batched_args, **kwarg_values)
# print(in_dims, [arg.shape for arg in batched_args], kwarg_values)
batched_out = vmap(op, in_dims=in_dims, out_dims=out_dim)(*batched_args, **kwarg_values)
yield (loop_out, batched_out)
# Tests case where we dispatch to a batching rule with no bdims
# This should be handled by autogenerated plumbing. For vmap support
# added via a manual plumbing you may need to handle this specially.
def add_bdim_if_tensor(x):
if isinstance(x, torch.Tensor):
return x.unsqueeze(1)
return x
def f(dummy, *args, **kwargs):
return op(*args, **kwargs)
dummy = torch.ones(batch_size, 1)
expected = pytree.tree_map(add_bdim_if_tensor, batched_out)
inner_in_dims = (0,) + pytree.tree_map(lambda x: None, in_dims)
outer_in_dims = (0,) + in_dims
output = vmap(vmap(f, inner_in_dims), outer_in_dims)(dummy, *batched_args, **kwarg_values)
yield (expected, output)
def opinfo_in_dict(opinfo, d):
return (opinfo.name in d) or (f'{opinfo.name}.{opinfo.variant_test_name}' in d)
def xfail(op_name, variant_name=None, *, device_type=None, dtypes=None):
return (op_name, variant_name, device_type, dtypes, True)
# TODO: this doesn't work in python < 3.8
def skip(op_name, variant_name=None, *, device_type=None, dtypes=None):
return (op_name, variant_name, device_type, dtypes, False)
def skipOps(test_case_name, base_test_name, to_skip):
all_opinfos = functorch_lagging_op_db + additional_op_db
for xfail in to_skip:
op_name, variant_name, device_type, dtypes, expected_failure = xfail
if variant_name is None:
# match all variants
matching_opinfos = [o for o in all_opinfos if o.name == op_name]
assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}"
else:
matching_opinfos = [o for o in all_opinfos
if o.name == op_name and o.variant_test_name == variant_name]
assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}"
for opinfo in matching_opinfos:
decorators = list(opinfo.decorators)
if expected_failure:
decorator = DecorateInfo(unittest.expectedFailure,
test_case_name, base_test_name,
device_type=device_type, dtypes=dtypes)
decorators.append(decorator)
else:
decorator = DecorateInfo(unittest.skip("Skipped!"),
test_case_name, base_test_name,
device_type=device_type, dtypes=dtypes)
decorators.append(decorator)
opinfo.decorators = tuple(decorators)
# This decorator doesn't modify fn in any way
def wrapped(fn):
return fn
return wrapped
def tol2(op_name, variant_name, override_dct, *, device_type=None):
return (op_name, variant_name, override_dct, device_type)
def tol1(op_name, override_dct, *, device_type=None):
return tol2(op_name, '', override_dct, device_type=device_type)
def opsToleranceOverride(test_case_name, base_test_name, overrides):
all_opinfos = functorch_lagging_op_db + additional_op_db
for override in overrides:
op_name, variant_name, override, device_type = override
matching_opinfos = [o for o in all_opinfos
if o.name == op_name and o.variant_test_name == variant_name]
assert len(matching_opinfos) == 1, f"Couldn't find OpInfo for {override}"
opinfo = matching_opinfos[0]
decorators = list(opinfo.decorators)
decorators.append(DecorateInfo(
toleranceOverride(override),
test_case_name, base_test_name, device_type=device_type))
opinfo.decorators = tuple(decorators)
# This decorator doesn't modify fn in any way
def wrapped(fn):
return fn
return wrapped
class DisableVmapFallback:
def __enter__(self):
self.prev_state = functorch._C._is_vmap_fallback_enabled()
functorch._C._set_vmap_fallback_enabled(False)
def __exit__(self, *ignored):
functorch._C._set_vmap_fallback_enabled(self.prev_state)
def check_vmap_fallback(test_case, thunk, opinfo, dry_run=False):
try:
with DisableVmapFallback():
thunk()
except Exception:
if not dry_run:
raise
if opinfo.variant_test_name:
print(f"xfail('{opinfo.name}', '{opinfo.variant_test_name}'),")
else:
print(f"xfail('{opinfo.name}'),")
| [
"torch.testing._internal.common_methods_invocations.DecorateInfo",
"torch.stack",
"torch.utils._pytree.tree_map",
"torch.ones",
"torch.utils._pytree.tree_unflatten",
"torch.testing._internal.common_device_type.toleranceOverride",
"torch.utils._pytree.tree_flatten"
] | 1.10.0 | ConnollyLeon/functorch | 0a7b3dff269148ceac831194f3358452b18b6e19 |
1.7 | # coding=utf-8
# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
# Copyright 2021, National Institute of Information and Communication Technology (Raj Dabre)
# Modified portions by Raj Dabre are indicated as so.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch MBART model. """
import copy
import math
import random
from typing import Optional, Tuple
import torch
import numpy as np
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
## Modified by Raj Dabre. Start.
from torch.autograd import Function
from mixture_of_experts import MoE
## Modified by Raj Dabre. End.
from ...activations import ACT2FN
from ...file_utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
Seq2SeqQuestionAnsweringModelOutput,
Seq2SeqSequenceClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_mbart import MBartConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MBartConfig"
_TOKENIZER_FOR_DOC = "MBartTokenizer"
MBART_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/mbart-large-cc25",
# See all MBART models at https://huggingface.co/models?filter=mbart
]
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int):
"""
Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not
have a single `decoder_start_token_id` in contrast to other Bart-like models.
"""
prev_output_tokens = input_ids.clone()
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
prev_output_tokens.masked_fill_(prev_output_tokens == -100, pad_token_id)
index_of_eos = (prev_output_tokens.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
decoder_start_tokens = prev_output_tokens.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].clone()
prev_output_tokens[:, 0] = decoder_start_tokens
return prev_output_tokens
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), -1e10) ## Changed here to -1e10 float("-inf") ## Modified by Raj Dabre.
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
## Modified by Raj Dabre. Start.
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None, wait_k: Optional[int] = -1, curr_decode_length: Optional[int] = -1):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
if wait_k != -1:
if curr_decode_length == -1:
expanded_mask = torch.tril(expanded_mask, wait_k-1) ## This causes the attention mask to be lower triangular to mask future tokens. If wait-k is k then the diagonal shift should be k-1.
else:
expanded_mask = torch.tril(expanded_mask, (curr_decode_length-1) + (wait_k-1)) ## This causes the attention mask to be lower triangular to mask future tokens. If wait-k is k then the diagonal shift should be k-1. This is used during decoding time as tgt_len will always be 1 so we need to shift the triangle by an appropriate amount.
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), -1e10) # torch.finfo(dtype).min
def cast_tuple(el):
return el if isinstance(el, tuple) else (el,)
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class Experts(nn.Module):
def __init__(self,
dim,
num_experts = 8,
hidden_dim = 128,
activation = ACT2FN['gelu'],
activation_dropout = 0.0,
std = 0.2):
super().__init__()
num_experts = cast_tuple(num_experts)
w1 = torch.zeros(*num_experts, dim, hidden_dim)
w2 = torch.zeros(*num_experts, hidden_dim, dim)
w1.normal_(mean=0.0, std=std)
w2.normal_(mean=0.0, std=std)
self.w1 = nn.Parameter(w1)
self.w2 = nn.Parameter(w2)
self.act = activation
self.act_drop = activation_dropout
def forward(self, x):
hidden = torch.einsum('...nd,...dh->...nh', x, self.w1)
hidden = F.dropout(self.act(hidden), p=self.act_drop, training=self.training)
out = torch.einsum('...nh,...hd->...nd', hidden, self.w2)
return out
class MBartSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__(num_positions, embedding_dim)
self.weight = self._init_weight(self.weight)
@staticmethod
def _init_weight(out: nn.Parameter):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
return out
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
## Modified by Raj Dabre. End.
# Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->MBart
class MBartLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
assert padding_idx is not None, "`padding_idx` should not be None, but of type int"
# MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models dont have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim, padding_idx=padding_idx)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions + self.offset)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->MBart
class MBartAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
multi_source_method = None,
no_scale_attention_embedding = False,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5 if not no_scale_attention_embedding else 1.0
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
## Modified by Raj Dabre. Start.
if multi_source_method == "merge_after_attention" or multi_source_method == "self_relevance_and_merge_after_attention" or multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or multi_source_method == "merge_after_attention_with_context_relevance_only" or multi_source_method == "mid_fusion_merge_after_attention" or multi_source_method == "bottleneck_mid_fusion_merge_after_attention": ## We pass the attentions through a gating method. X and Y are combined as w*x+(1-w)*Y where w=sigmoid(W[X:Y]) where [X:Y] is the concatenation of X and Y along hidden axis.
if multi_source_method == "merge_after_attention" or multi_source_method == "self_relevance_and_merge_after_attention" or multi_source_method == "mid_fusion_merge_after_attention" or multi_source_method == "bottleneck_mid_fusion_merge_after_attention":
self.gating_layer = nn.Linear(2*self.head_dim, self.head_dim, bias=False)
else:
self.gating_layer = nn.Linear(self.head_dim, self.head_dim, bias=False)
self.multi_source = True
self.multi_source_method = multi_source_method
else:
self.multi_source = False
self.multi_source_method = ""
## Modified by Raj Dabre. End.
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
additional_key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
additional_past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
additional_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
prompt_params = None,
adaptor_or_prompt_layer_idx = 0,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
## Modified by Raj Dabre. Start.
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
if self.multi_source: # additional_past_key_value is not None
additional_key_states = additional_past_key_value[0]
additional_value_states = additional_past_key_value[1]
## Modified by Raj Dabre. Enf.
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
if prompt_params is not None:
prompt_params_expanded = self._shape(prompt_params[0][adaptor_or_prompt_layer_idx], -1, bsz)
key_states = torch.cat([prompt_params_expanded, key_states], dim=2)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
if prompt_params is not None:
prompt_params_expanded = self._shape(prompt_params[1][adaptor_or_prompt_layer_idx], -1, bsz)
value_states = torch.cat([prompt_params_expanded, value_states], dim=2)
## Modified by Raj Dabre. Start.
if self.multi_source: # additional_past_key_value is not None
additional_key_states = self._shape(self.k_proj(additional_key_value_states), -1, bsz)
if prompt_params is not None:
prompt_params_expanded = self._shape(prompt_params[0][adaptor_or_prompt_layer_idx], -1, bsz)
additional_key_states = torch.cat([prompt_params_expanded, additional_key_states], dim=2)
additional_value_states = self._shape(self.v_proj(additional_key_value_states), -1, bsz)
if prompt_params is not None:
prompt_params_expanded = self._shape(prompt_params[1][adaptor_or_prompt_layer_idx], -1, bsz)
additional_value_states = torch.cat([prompt_params_expanded, additional_value_states], dim=2)
## Modified by Raj Dabre. End.
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
if prompt_params is not None:
prompt_params_expanded = self._shape(prompt_params[0][adaptor_or_prompt_layer_idx], -1, bsz)
key_states = torch.cat([prompt_params_expanded, key_states], dim=2)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if prompt_params is not None:
prompt_params_expanded = self._shape(prompt_params[1][adaptor_or_prompt_layer_idx], -1, bsz)
value_states = torch.cat([prompt_params_expanded, value_states], dim=2)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
## Modified by Raj Dabre. Start.
if self.multi_source and is_cross_attention: ## Both conditions are not needed as one multi-source logic can only run when there is cross attention. multi_source is sufficient but keeping this condition for checking.
additional_past_key_value = (additional_key_states, additional_value_states)
## Modified by Raj Dabre. End.
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
## Modified by Raj Dabre. Start.
if self.multi_source:
additional_key_states = additional_key_states.view(*proj_shape)
additional_value_states = additional_value_states.view(*proj_shape)
additional_src_len = additional_key_states.size(1)
additional_attn_weights = torch.bmm(query_states, additional_key_states.transpose(1, 2))
assert additional_attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
additional_src_len,
), f"Additional attention weights should be of size {(bsz * self.num_heads, tgt_len, additional_src_len)}, but is {additional_attn_weights.size()}"
if additional_attention_mask is not None:
assert additional_attention_mask.size() == (
bsz,
1,
tgt_len,
additional_src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, additional_src_len)}, but is {additional_attention_mask.size()}"
additional_attn_weights = additional_attn_weights.view(bsz, self.num_heads, tgt_len, additional_src_len) + additional_attention_mask
additional_attn_weights = additional_attn_weights.view(bsz * self.num_heads, tgt_len, additional_src_len)
additional_attn_weights = F.softmax(additional_attn_weights, dim=-1)
## Modified by Raj Dabre. End.
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
## Modified by Raj Dabre. Start.
if self.multi_source:
additional_attn_weights = layer_head_mask.view(1, -1, 1, 1) * additional_attn_weights.view(bsz, self.num_heads, tgt_len, additional_src_len)
additional_attn_weights = additional_attn_weights.view(bsz * self.num_heads, tgt_len, additional_src_len)
## Modified by Raj Dabre. End.
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
## Modified by Raj Dabre. Start.
if self.multi_source:
additional_attn_weights_reshaped = additional_attn_weights.view(bsz, self.num_heads, tgt_len, additional_src_len)
additional_attn_weights = additional_attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, additional_src_len)
## Modified by Raj Dabre. End.
else:
attn_weights_reshaped = None
## Modified by Raj Dabre. Start.
if self.multi_source:
additional_attn_weights_reshaped = None
## Modified by Raj Dabre. End.
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
## Modified by Raj Dabre. Start.
if self.multi_source:
additional_attn_probs = F.dropout(additional_attn_weights, p=self.dropout, training=self.training)
additional_attn_output = torch.bmm(additional_attn_probs, additional_value_states)
assert additional_attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {additional_attn_output.size()}"
if self.multi_source_method == "merge_after_attention" or self.multi_source_method == "self_relevance_and_merge_after_attention" or self.multi_source_method == "mid_fusion_merge_after_attention" or self.multi_source_method == "bottleneck_mid_fusion_merge_after_attention":
attentions_merged = torch.cat([attn_output, additional_attn_output], -1) ## Concatenate along hidden axis.
gating_weight = torch.sigmoid(self.gating_layer(attentions_merged)) ## Compute gating weight.
attn_output = gating_weight*attn_output + (1.0-gating_weight)*additional_attn_output ## Combine attentions.
else:
context_self_relevance_weight = torch.sigmoid(self.gating_layer(additional_attn_output)) ## Compute gating weight.
attn_output = attn_output + context_self_relevance_weight*additional_attn_output ## Combine attentions.
## Modified by Raj Dabre. End.
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
## Modified by Raj Dabre. Start.
if self.multi_source:
return attn_output, attn_weights_reshaped, additional_attn_weights_reshaped, past_key_value, additional_past_key_value
else:
return attn_output, attn_weights_reshaped, past_key_value
## Modified by Raj Dabre. End.
class MBartEncoderLayer(nn.Module):
def __init__(self, config: MBartConfig):
super().__init__()
self.embed_dim = config.d_model
self.config = config
moe_loss = () if self.config.use_moe else None
self.self_attn = MBartAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
no_scale_attention_embedding=config.no_scale_attention_embedding,
) ## An if else condition to either return the sann or a FFT. The FFT will be implemented via a method which pre-generates a bunch of matrices and returns a closure which uses the right matrix during runtime.
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
if config.use_moe:
print("Using Mixtures of Experts")
experts = Experts(dim = self.embed_dim,
num_experts = config.num_experts,
hidden_dim = config.expert_ffn_size,
activation = ACT2FN[config.activation_function],
activation_dropout = self.activation_dropout,
std = config.init_std)
self.moe = MoE(
dim = self.embed_dim,
num_experts = config.num_experts,
hidden_dim = config.expert_ffn_size,
second_policy_train = 'random',
second_policy_eval = 'random',
second_threshold_train = 0.2,
second_threshold_eval = 0.2,
capacity_factor_train = 1.25,
capacity_factor_eval = 2.,
loss_coef = 1e-2,
experts = experts
)
else:
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
prompt_params = None,
adaptor_layers = None,
deep_adaptor_tuning = False,
deep_adaptor_tuning_ffn_only = False,
parallel_adaptors=False,
adaptor_or_prompt_layer_idx = 0,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
prompt_params=prompt_params,
adaptor_or_prompt_layer_idx=adaptor_or_prompt_layer_idx,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if adaptor_layers is not None and deep_adaptor_tuning: # Apply adaptor layer to current layer's output.
if parallel_adaptors:
adaptor_input = residual
else:
adaptor_input = hidden_states
adaptor_output = adaptor_layers(adaptor_input, True, adaptor_or_prompt_layer_idx*2)
if parallel_adaptors:
hidden_states = adaptor_output + hidden_states
else:
hidden_states = adaptor_output
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.config.use_moe:
hidden_states, moe_loss = self.moe(hidden_states)
else:
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if adaptor_layers is not None:
if parallel_adaptors:
adaptor_input = residual
else:
adaptor_input = hidden_states
if deep_adaptor_tuning: # Apply adaptor layer to current layer's output.
adaptor_output = adaptor_layers(adaptor_input, True, adaptor_or_prompt_layer_idx*2+1)
elif deep_adaptor_tuning_ffn_only:
adaptor_output = adaptor_layers(adaptor_input, True, adaptor_or_prompt_layer_idx)
if parallel_adaptors:
hidden_states = adaptor_output + hidden_states
else:
hidden_states = adaptor_output
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
if self.config.use_moe:
outputs = ([hidden_states, moe_loss],)
else:
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class MBartDecoderLayer(nn.Module):
def __init__(self, config: MBartConfig):
super().__init__()
self.embed_dim = config.d_model
self.config = config
self.self_attn = MBartAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
no_scale_attention_embedding=config.no_scale_attention_embedding,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = MBartAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
multi_source_method=config.multi_source_method,
no_scale_attention_embedding=config.no_scale_attention_embedding,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
if config.use_moe:
print("Using Mixtures of Experts")
experts = Experts(dim = self.embed_dim,
num_experts = config.num_experts,
hidden_dim = config.expert_ffn_size,
activation = ACT2FN[config.activation_function],
activation_dropout = self.activation_dropout,
std = config.init_std)
self.moe = MoE(
dim = self.embed_dim,
num_experts = config.num_experts,
hidden_dim = config.expert_ffn_size,
second_policy_train = 'random',
second_policy_eval = 'random',
second_threshold_train = 0.2,
second_threshold_eval = 0.2,
capacity_factor_train = 1.25,
capacity_factor_eval = 2.,
loss_coef = 1e-2,
experts = experts
)
else:
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
encoder_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
additional_encoder_hidden_states: Optional[torch.Tensor] = None,
additional_encoder_attention_mask: Optional[torch.Tensor] = None,
prompt_params = None,
adaptor_layers = None,
deep_adaptor_tuning = False,
deep_adaptor_tuning_ffn_only = False,
parallel_adaptors=False,
adaptor_or_prompt_layer_idx = 0,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
#print(attention_mask.size() if attention_mask is not None else 1, encoder_attention_mask.size() if encoder_attention_mask is not None else 1, additional_encoder_attention_mask.size() if additional_encoder_attention_mask is not None else 1)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
prompt_params=[prompt_params[0], prompt_params[1]] if prompt_params is not None else None,
adaptor_or_prompt_layer_idx=adaptor_or_prompt_layer_idx,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if adaptor_layers is not None and deep_adaptor_tuning: # Apply adaptor layer to current layer's output.
if parallel_adaptors:
adaptor_input = residual
else:
adaptor_input = hidden_states
adaptor_output = adaptor_layers(adaptor_input, False, adaptor_or_prompt_layer_idx*3)
if parallel_adaptors:
hidden_states = adaptor_output + hidden_states
else:
hidden_states = adaptor_output
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention"):
additional_cross_attn_weights = None
additional_cross_attn_present_key_value = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
## Modified by Raj Dabre. Start.
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention"): ## This if else is not needed but keeping it that way for cleaner flow of logic.
cross_attn_past_key_value = past_key_value[-4:-2] if past_key_value is not None else None
additional_cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, additional_cross_attn_weights, cross_attn_present_key_value, additional_cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
additional_key_value_states=additional_encoder_hidden_states,
attention_mask=encoder_attention_mask,
additional_attention_mask=additional_encoder_attention_mask,
layer_head_mask=layer_head_mask, ## Should be none. Dont mess with this.
past_key_value=cross_attn_past_key_value,
additional_past_key_value=additional_cross_attn_past_key_value,
output_attentions=output_attentions, ## Should be false. Dont mess with this.
prompt_params=[prompt_params[2], prompt_params[3]] if prompt_params is not None else None,
adaptor_or_prompt_layer_idx=adaptor_or_prompt_layer_idx,
)
#print(hidden_states.size() if hidden_states is not None else 1, attention_mask.size() if attention_mask is not None else 1)
else:
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
prompt_params=[prompt_params[2], prompt_params[3]] if prompt_params is not None else None,
adaptor_or_prompt_layer_idx=adaptor_or_prompt_layer_idx,
)
## Modified by Raj Dabre. End.
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
## Modified by Raj Dabre. Start.
# add cross-attn to positions 3,4 of present_key_value tuple
if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention"):
present_key_value = present_key_value + cross_attn_present_key_value + additional_cross_attn_present_key_value
else:
present_key_value = present_key_value + cross_attn_present_key_value ## Deal with the additional_cross_attn_present_key_value
## Modified by Raj Dabre. End.
if adaptor_layers is not None and deep_adaptor_tuning: # Apply adaptor layer to current layer's output.
if parallel_adaptors:
adaptor_input = residual
else:
adaptor_input = hidden_states
adaptor_output = adaptor_layers(adaptor_input, False, adaptor_or_prompt_layer_idx*3+1)
if parallel_adaptors:
hidden_states = adaptor_output + hidden_states
else:
hidden_states = adaptor_output
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.config.use_moe:
hidden_states, moe_loss = self.moe(hidden_states)
else:
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if adaptor_layers is not None:
if parallel_adaptors:
adaptor_input = residual
else:
adaptor_input = hidden_states
if deep_adaptor_tuning: # Apply adaptor layer to current layer's output.
adaptor_output = adaptor_layers(adaptor_input, False, adaptor_or_prompt_layer_idx*3+2)
elif deep_adaptor_tuning_ffn_only:
adaptor_output = adaptor_layers(adaptor_input, False, adaptor_or_prompt_layer_idx)
if parallel_adaptors:
hidden_states = adaptor_output + hidden_states
else:
hidden_states = adaptor_output
if self.config.use_moe:
outputs = ([hidden_states, moe_loss],)
else:
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
## Modified by Raj Dabre. Start.
if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only"):
outputs += (additional_cross_attn_weights,)
## Modified by Raj Dabre. End.
if use_cache:
outputs += (present_key_value,) ## Deal with the additional_cross_attn_present_key_value
return outputs
# Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->MBart
class MBartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim: int,
inner_dim: int,
num_classes: int,
pooler_dropout: float,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class MBartPreTrainedModel(PreTrainedModel):
config_class = MBartConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
MBART_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.MBartConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
MBART_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import MBartTokenizer, MBartForConditionalGeneration, MBartConfig
>>> model = MBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25')
>>> tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25')
>>> ARTICLE_TO_SUMMARIZE = "Meine Freunde sind cool, aber sie essen zu viel Kuchen."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
Mask filling example::
>>> from transformers import MBartTokenizer, MBartForConditionalGeneration
>>> tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25')
>>> # de_DE is the language symbol id <LID> for German
>>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE"
>>> model = MBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25')
>>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors='pt')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
"""
MBART_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.MBartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the :obj:`input_ids` to the right, following the paper.
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.MBartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
MBart uses a specific language id token as the starting token for :obj:`decoder_input_ids` generation that
varies according to source and target language, *e.g.* 25004 for `en_XX`, and 25003 for `de_DE`. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
For translation and summarization training, :obj:`decoder_input_ids` should be provided. If no
:obj:`decoder_input_ids` is provided, the model will create this tensor by shifting the :obj:`input_ids` to
the right for denoising pre-training following the paper.
decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read :func:`modeling_mbart._prepare_decoder_inputs` and
modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more
information on the default strategy.
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class MBartEncoder(MBartPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`MBartEncoderLayer`.
Args:
config: MBartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
## Modified by Raj Dabre. Start.
if config.features_vocab_sizes is not None: ### Set up embedders for features
self.features_embed_tokens = [nn.Embedding(feature_vocab_size, feature_embed_dim, self.padding_idx) for feature_vocab_size, feature_embed_dim in zip(config.features_vocab_sizes, config.features_embed_dims)]
self.features_final_project = nn.Linear(embed_dim+sum(config.features_embed_dims), embed_dim, bias=False)
else:
self.features_embed_tokens = None
self.features_final_project = None
## Modified by Raj Dabre. End.
if config.no_positional_encoding_encoder:
print("Using no positional encodings for encoder")
self.embed_positions = 0
else:
if config.positional_encodings:
print("Using positional encodings")
self.embed_positions = MBartSinusoidalPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
else:
print("Using positional embeddings")
self.embed_positions = MBartLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
## Modified by Raj Dabre. Start.
if config.encoder_tying_config is not None: ## Create unique or shared layers as per sharing configuration.
layer_idxs = config.encoder_tying_config.strip().split("-")
unique_idxs = sorted(set(layer_idxs))
self.unique_layers = nn.ModuleList([MBartEncoderLayer(config) for idx in unique_idxs])
self.layers = [self.unique_layers[int(idx)-1] for idx in layer_idxs]
else:
self.layers = nn.ModuleList([MBartEncoderLayer(config) for _ in range(config.encoder_layers)])
if config.multi_source and (config.multi_source_method == "self_relevance" or config.multi_source_method == "self_relevance_and_merge_before_attention" or config.multi_source_method == "self_relevance_and_merge_after_attention" or config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only"): ## We should pass each input through a relevance mechanism which is sigmoid(Wx) where x is the representation of the input.
self.self_relevance_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
## Modified by Raj Dabre. End.
if not config.no_embed_norm:
self.layernorm_embedding = nn.LayerNorm(embed_dim)
if config.multi_source and (config.multi_source_method == "mid_fusion_merge_before_attention" or config.multi_source_method == "mid_fusion_merge_after_attention" or config.multi_source_method == "bottleneck_mid_fusion_merge_before_attention" or config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention"):
pass
else:
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
features_ids=None, ### A tuple or list of feature ids. Each should have the same dimension as input_ids
additional_input_ids=None, ## Placeholder argument. Wont be used.
additional_input_ids_mask=None, ## Placeholder argument. Wont be used.
prompt_params=None, ## Prompts to be prepended to the encoder outputs.
adaptor_layers=None, ## Adaptor layers to used in the encoder.
deep_adaptor_tuning=False, ## Whether to use deep adaptor tuning or not.
deep_adaptor_tuning_ffn_only=False, ## Whether to use deep adaptor tuning only after ffn or not.
parallel_adaptors=False, ## Whether to use parallel adaptors or not.
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.MBartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
input_shape = inputs_embeds.size()[:-1]
if prompt_params is not None:
prompt_shape = prompt_params[0][0].size()[:-1]
for prompt_param_idx in range(len(prompt_params[0])):
prompt_params[0][prompt_param_idx] = prompt_params[0][prompt_param_idx] * self.embed_scale
prompt_params[0][prompt_param_idx] = prompt_params[0][prompt_param_idx].repeat(input_shape[0], 1, 1)
prompt_params[1][prompt_param_idx] = prompt_params[1][prompt_param_idx] * self.embed_scale
prompt_params[1][prompt_param_idx] = prompt_params[1][prompt_param_idx].repeat(input_shape[0], 1, 1)
## Modified by Raj Dabre. Start.
if self.features_final_project is not None and self.features_embed_tokens is not None: ## Perform feature computation and concatenation and projection.
features_embeds = [feature_embed_tokens(feature_id) for feature_embed_tokens, feature_input_id in zip(self.features_embed_tokens, features_ids)]
all_embeds = [inputs_embeds] + features_embeds
input_embeds = self.features_final_project(torch.cat(all_embeds, dim=-1))## Basic feature based model. Add relevance model here.
## Modified by Raj Dabre. End.
if self.config.no_positional_encoding_encoder:
embed_pos = self.embed_positions
if prompt_params is not None:
prompt_pos = self.embed_positions
else:
if prompt_params is not None:
prompt_pos = self.embed_positions(prompt_shape, 0)
embed_pos = self.embed_positions(input_shape, prompt_shape[1])
else:
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
if prompt_params is not None:
for prompt_param_idx in range(len(prompt_params[0])):
prompt_params[0][prompt_param_idx] = prompt_params[0][prompt_param_idx] + prompt_pos
prompt_params[1][prompt_param_idx] = prompt_params[1][prompt_param_idx] + prompt_pos
if not self.config.no_embed_norm:
hidden_states = self.layernorm_embedding(hidden_states)
if prompt_params is not None:
for prompt_param_idx in range(len(prompt_params[0])):
prompt_params[0][prompt_param_idx] = self.layernorm_embedding(prompt_params[0][prompt_param_idx])
prompt_params[1][prompt_param_idx] = self.layernorm_embedding(prompt_params[1][prompt_param_idx])
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
if prompt_params is not None:
for prompt_param_idx in range(len(prompt_params[0])):
prompt_params[0][prompt_param_idx] = F.dropout(prompt_params[0][prompt_param_idx], p=self.dropout, training=self.training)
prompt_params[1][prompt_param_idx] = F.dropout(prompt_params[1][prompt_param_idx], p=self.dropout, training=self.training)
# hidden_states = torch.cat([prompt_params[0], hidden_states], dim=1)
## Modified by Raj Dabre. Start.
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
input_shape = inputs_embeds.size()[:-1]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[1] if prompt_params is not None else None, wait_k=1 if self.config.wait_k!=-1 or self.config.unidirectional_encoder else -1) ## Raj: Just make the mask wait-k with a k=1 and we are good to go. We want to have a unidirectional encoder no matter what.
## Modified by Raj Dabre. End.
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
moe_losses = () if self.config.use_moe else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
prompt_params=prompt_params,
adaptor_layers=adaptor_layers,
deep_adaptor_tuning=deep_adaptor_tuning,
deep_adaptor_tuning_ffn_only=deep_adaptor_tuning_ffn_only,
parallel_adaptors=parallel_adaptors,
adaptor_or_prompt_layer_idx=idx,
)
if self.config.use_moe:
hidden_states, moe_loss = layer_outputs[0]
moe_losses += (moe_loss,)
else:
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
### If prompts are used then we use the prompt embeddings instead of the updated representations of prompt embeddings when passed through a layer.
# if prompt_params is not None:
# hidden_states = torch.cat([prompt_params[idx+1], hidden_states[:,prompt_shape[1]:,:]], dim=1)
## Modified by Raj Dabre. Start.
if self.config.multi_source and (self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_before_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only"):
hidden_states = hidden_states*torch.sigmoid(self.self_relevance_layer(hidden_states)) # Do self relevance as usual.
## Modified by Raj Dabre. End.
if adaptor_layers is not None and not deep_adaptor_tuning and not deep_adaptor_tuning_ffn_only: ## Apply adaptor layer for final encoder layer.
hidden_states = adaptor_layers(hidden_states, True)
if self.config.multi_source and (self.config.multi_source_method == "mid_fusion_merge_before_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_before_attention" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention"): # No layer norm because the fusion layers have not been processed yet.
pass
else:
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions, moe_losses] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions, moe_losses=moe_losses
)
class MBartDecoder(MBartPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`MBartDecoderLayer`
Args:
config: MBartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
if config.no_positional_encoding_decoder:
print("Using no positional encodings for decoder")
self.embed_positions = 0
else:
if config.positional_encodings:
print("Using positional encodings")
self.embed_positions = MBartSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
else:
print("Using positional embeddings")
self.embed_positions = MBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
## Modified by Raj Dabre. Start.
if config.decoder_tying_config is not None: ## Create unique or shared layers as per sharing configuration.
layer_idxs = config.decoder_tying_config.strip().split("-")
unique_idxs = sorted(set(layer_idxs))
self.unique_layers = nn.ModuleList([MBartDecoderLayer(config) for idx in unique_idxs])
self.layers = [self.unique_layers[int(idx)-1] for idx in layer_idxs]
else:
self.layers = nn.ModuleList([MBartDecoderLayer(config) for _ in range(config.decoder_layers)])
## Modified by Raj Dabre. End.
if not config.no_embed_norm:
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # prompting=False
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
# if prompting:
# bsz, _, tgt_seq_len, src_seq_len = combined_attention_mask.size()
# combined_attention_mask = torch.cat([combined_attention_mask[:,:,0:1,:].expand(bsz, 1, past_key_values_length, src_seq_len), combined_attention_mask], dim=2)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
additional_encoder_hidden_states=None,
additional_encoder_attention_mask=None,
curr_decode_length=-1,
prompt_params=None, ## Prompts to be prepended to the decoder outputs.
adaptor_layers=None, ## Adaptor layers to be used in the decoder.
deep_adaptor_tuning=False, ## Whether to use deep adaptor tuning.
deep_adaptor_tuning_ffn_only=False, ## Whether to use deep adaptor tuning after ffn only.
parallel_adaptors=False, ## Whether to use parallel adaptors.
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.MBartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
if prompt_params is not None: ## During training past_key_values_length will always be 0 so it needs to be increased to get a proper causal decoder. Of course we need the input embeds to be augmented with the prompt info. During evaluation, this does not matter at all but we need the input embeds to be augmented with the prompt info during the first generation step.
prompt_shape = prompt_params[0][0].size()[:-1]
past_key_values_length += prompt_shape[1]
batch_dims = inputs_embeds.size()
for prompt_params_idx in range(len(prompt_params[0])):
prompt_params[0][prompt_params_idx] = prompt_params[0][prompt_params_idx] * self.embed_scale
prompt_params[0][prompt_params_idx] = prompt_params[0][prompt_params_idx].repeat(batch_dims[0], 1, 1)# Repeat the embeddings for each batch
prompt_params[1][prompt_params_idx] = prompt_params[1][prompt_params_idx] * self.embed_scale
prompt_params[1][prompt_params_idx] = prompt_params[1][prompt_params_idx].repeat(batch_dims[0], 1, 1)# Repeat the embeddings for each batch
prompt_params[2][prompt_params_idx] = prompt_params[2][prompt_params_idx] * self.embed_scale
prompt_params[2][prompt_params_idx] = prompt_params[2][prompt_params_idx].repeat(batch_dims[0], 1, 1)# Repeat the embeddings for each batch
prompt_params[3][prompt_params_idx] = prompt_params[3][prompt_params_idx] * self.embed_scale
prompt_params[3][prompt_params_idx] = prompt_params[3][prompt_params_idx].repeat(batch_dims[0], 1, 1)# Repeat the embeddings for each batch
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
) ## Will be none if not training.
## Modified by Raj Dabre. Start.
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1], wait_k=self.config.wait_k, curr_decode_length=curr_decode_length) ## Raj: Just make the mask wait-k and we are good to go. We wont deal with wait-k and prompts at the moment since it gets a bit tricky. TODO: Make prompts and wait-k work together. # +(prompt_shape[1] if prompt_params is not None and (self.training or curr_decode_length == 1) else 0)
if self.config.multi_source:
if additional_encoder_hidden_states is not None and additional_encoder_attention_mask is not None:
additional_encoder_attention_mask = _expand_mask(additional_encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1], wait_k=self.config.additional_source_wait_k, curr_decode_length=curr_decode_length) ## Raj: Just make the mask wait-k and we are good to go.
# embed positions
#print(encoder_attention_mask.size() if encoder_attention_mask is not None else 1, additional_encoder_attention_mask.size() if additional_encoder_attention_mask is not None else 1)
## Modified by Raj Dabre. End.
if self.config.no_positional_encoding_decoder:
positions = self.embed_positions
if prompt_params is not None:
prompt_positions = self.embed_positions
else:
if prompt_params is not None:
prompt_positions = self.embed_positions(prompt_shape, 0)
positions = self.embed_positions(inputs_embeds.size(), past_key_values_length) ## No matter what, the past key values length will be be properly updated.
hidden_states = inputs_embeds + positions
if prompt_params is not None:
for prompt_params_idx in range(len(prompt_params[0])):
prompt_params[0][prompt_params_idx] = prompt_params[0][prompt_params_idx] + prompt_positions
prompt_params[1][prompt_params_idx] = prompt_params[1][prompt_params_idx] + prompt_positions
prompt_params[2][prompt_params_idx] = prompt_params[2][prompt_params_idx] + prompt_positions
prompt_params[3][prompt_params_idx] = prompt_params[3][prompt_params_idx] + prompt_positions
if not self.config.no_embed_norm:
hidden_states = self.layernorm_embedding(hidden_states)
if prompt_params is not None:
for prompt_params_idx in range(len(prompt_params[0])):
prompt_params[0][prompt_params_idx] = self.layernorm_embedding(prompt_params[0][prompt_params_idx])
prompt_params[1][prompt_params_idx] = self.layernorm_embedding(prompt_params[1][prompt_params_idx])
prompt_params[2][prompt_params_idx] = self.layernorm_embedding(prompt_params[2][prompt_params_idx])
prompt_params[3][prompt_params_idx] = self.layernorm_embedding(prompt_params[3][prompt_params_idx])
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
if prompt_params is not None:
for prompt_params_idx in range(len(prompt_params[0])):
prompt_params[0][prompt_params_idx] = F.dropout(prompt_params[0][prompt_params_idx], p=self.dropout, training=self.training)
prompt_params[1][prompt_params_idx] = F.dropout(prompt_params[1][prompt_params_idx], p=self.dropout, training=self.training)
prompt_params[2][prompt_params_idx] = F.dropout(prompt_params[2][prompt_params_idx], p=self.dropout, training=self.training)
prompt_params[3][prompt_params_idx] = F.dropout(prompt_params[3][prompt_params_idx], p=self.dropout, training=self.training)
# hidden_states = torch.cat([prompt_params[0], hidden_states], dim=1)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
moe_losses = () if self.config.use_moe else None
## Modified by Raj Dabre. Start.
additional_all_cross_attentions = () if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention") and output_attentions and additional_encoder_hidden_states is not None else None
next_decoder_cache = () if use_cache else None
if self.config.multi_source and (self.config.multi_source_method == "merge_before_attention" or self.config.multi_source_method == "self_relevance_and_merge_before_attention" or self.config.multi_source_method == "mid_fusion_merge_before_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_before_attention" ):
encoder_hidden_states = torch.cat([encoder_hidden_states, additional_encoder_hidden_states], 1) ## Concatenate sequences blindly along the sequence axis.
encoder_attention_mask = torch.cat([encoder_attention_mask, additional_encoder_attention_mask], -1) ## Concatenate along the src_seq_len axis.
#print(encoder_hidden_states.size(), encoder_attention_mask.size())
## Modified by Raj Dabre. End.
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
encoder_head_mask[idx] if encoder_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
additional_encoder_hidden_states=additional_encoder_hidden_states,
additional_encoder_attention_mask=additional_encoder_attention_mask,
prompt_params=prompt_params,
adaptor_layers=adaptor_layers,
deep_adaptor_tuning=deep_adaptor_tuning,
deep_adaptor_tuning_ffn_only=deep_adaptor_tuning_ffn_only,
parallel_adaptors=parallel_adaptors,
adaptor_or_prompt_layer_idx=idx,
)
if self.config.use_moe:
hidden_states, moe_loss = layer_outputs[0]
moe_losses += (moe_loss,)
else:
hidden_states = layer_outputs[0]
# If prompts are used then we use the prompt embeddings instead of the updated representations of prompt embeddings when passed through a layer.
# if prompt_params is not None and (self.training or curr_decode_length == 1):
# hidden_states = torch.cat([prompt_params[idx+1], hidden_states[:, prompt_shape[1]:, :]], dim=1)
## Modified by Raj Dabre. Start.
if use_cache:
if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention"):
next_decoder_cache += (layer_outputs[4 if output_attentions else 1],)
else:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
## Modified by Raj Dabre. End.
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
## Modified by Raj Dabre. Start.
if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention"):
additional_all_cross_attentions += (layer_outputs[3],)
## Modified by Raj Dabre. End.
if adaptor_layers is not None and not deep_adaptor_tuning and not deep_adaptor_tuning_ffn_only: ## Apply adaptor layer for final decoder output only.
hidden_states = adaptor_layers(hidden_states, False)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
## Modified by Raj Dabre. Start.
if not return_dict:
if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention"):
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions, additional_all_cross_attentions, moe_losses]
if v is not None
)
else:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions, moe_losses]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
additional_cross_attentions=additional_all_cross_attentions,
moe_losses = moe_losses,
)
## Modified by Raj Dabre. End.
@add_start_docstrings(
"The bare MBART Model outputting raw hidden-states without any specific head on top.",
MBART_START_DOCSTRING,
)
class MBartModel(MBartPreTrainedModel):
def __init__(self, config: MBartConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = MBartEncoder(config, self.shared)
self.decoder = MBartDecoder(config, self.shared)
## Modified by Raj Dabre. Start.
if self.config.multi_source and config.multi_source_method == "additional_source_attention":
self.context_attention = MBartDecoderLayer(config)
self.context_norm = nn.LayerNorm(config.d_model)
if self.config.multi_source and (config.multi_source_method == "mid_fusion_merge_before_attention" or config.multi_source_method == "bottleneck_mid_fusion_merge_before_attention" or config.multi_source_method == "mid_fusion_merge_after_attention" or config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention"):
self.mid_fusion_layers = nn.ModuleList([MBartEncoderLayer(config) for _ in range(config.mid_fusion_layers)])
self.mid_fusion_norm = nn.LayerNorm(config.d_model)
if config.multi_source_method == "bottleneck_mid_fusion_merge_before_attention" or config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention":
bottleneck_params = torch.zeros(1, config.bottleneck_mid_fusion_tokens, config.d_model)
bottleneck_params.normal_(mean=0.0, std=config.init_std)
self.bottleneck_params = torch.nn.Parameter(bottleneck_params)
## Modified by Raj Dabre. End.
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/mbart-large-cc25",
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
additional_input_ids=None,
additional_input_ids_mask=None,
additional_encoder_outputs=None,
context_encoder_representations=None,
curr_decode_length=-1,
prompt_params=None,
adaptor_layers=None,
deep_adaptor_tuning=False,
deep_adaptor_tuning_ffn_only=False,
parallel_adaptors=False,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# different to other models, MBart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
prompt_params=[prompt_params[0], prompt_params[1]] if prompt_params is not None else None,
adaptor_layers=adaptor_layers,
deep_adaptor_tuning=deep_adaptor_tuning,
deep_adaptor_tuning_ffn_only=deep_adaptor_tuning_ffn_only,
parallel_adaptors=parallel_adaptors,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
## Modified by Raj Dabre. Start.
if self.config.multi_source:
if additional_encoder_outputs is None:
main_source_wait_k = self.config.wait_k
self.config.wait_k = self.config.additional_source_wait_k
additional_encoder_outputs = self.encoder(
input_ids=additional_input_ids,
attention_mask=additional_input_ids_mask,
head_mask=head_mask, ## Should be None. Dont mess with this.
inputs_embeds=inputs_embeds, ## Should be None. Dont mess with this.
output_attentions=output_attentions, ## Should be False. Dont mess with this.
output_hidden_states=output_hidden_states, ## Should be False. Dont mess with this.
return_dict=return_dict,
)
if self.config.use_moe: ## Add the additional encoder MOE losses to the main encoder.
encoder_outputs[3] = encoder_outputs[3] + additional_encoder_outputs[3]
self.config.wait_k = main_source_wait_k
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(additional_encoder_outputs, BaseModelOutput):
additional_encoder_outputs = BaseModelOutput(
last_hidden_state=additional_encoder_outputs[0],
hidden_states=additional_encoder_outputs[1] if len(additional_encoder_outputs) > 1 else None,
attentions=additional_encoder_outputs[2] if len(additional_encoder_outputs) > 2 else None,
) ## Figure out a way to return this
else:
additional_encoder_outputs = [None]
if self.config.multi_source and (self.config.multi_source_method == "mid_fusion_merge_before_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_before_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention" or self.config.multi_source_method == "mid_fusion_merge_after_attention"):
# Concatenate the encoder and additional encoder outputs or concatenate the bottleneck params with the encoder and additional encoder outputs
# Create encoder layers to deal with further processing
# Do processing, deal with MOEs etc, update the hidden states after splitting at each stage etc
# We will need a new hyperparam to tell us the number of additional layers. Will have to deal with recurrent stacking etc. additional layers and current layers sum to actual total layers.
# Disable layer norm in the main encoder code when this type of fusion is done.
if context_encoder_representations is None:
hidden_states = encoder_outputs[0]
additional_hidden_states = additional_encoder_outputs[0]
encoder_input_length = hidden_states.size()[1]
additional_encoder_input_length = additional_hidden_states.size()[1]
encoder_self_attention_mask = _expand_mask(attention_mask, hidden_states.dtype, wait_k=self.config.wait_k)
additional_encoder_self_attention_mask = _expand_mask(additional_input_ids_mask, additional_hidden_states.dtype, wait_k=self.config.additional_source_wait_k)
if self.config.multi_source_method == "mid_fusion_merge_before_attention" or self.config.multi_source_method == "mid_fusion_merge_after_attention":
# Concatenate the encoder and additional encoder outputs
# We have to deal with creation of attention masks for encoder to itself, additional encoder to itself and then cross between these two.
encoder_to_additional_encoder_self_attention_mask = _expand_mask(additional_input_ids_mask, additional_hidden_states.dtype, tgt_len=encoder_input_length, wait_k=self.config.wait_k)
additional_encoder_to_encoder_self_attention_mask = _expand_mask(attention_mask, hidden_states.dtype, tgt_len=additional_encoder_input_length, wait_k=self.config.additional_source_wait_k)
combined_mask_a = torch.cat([encoder_self_attention_mask, encoder_to_additional_encoder_self_attention_mask], dim=3)
combined_mask_b = torch.cat([additional_encoder_to_encoder_self_attention_mask, additional_encoder_self_attention_mask], dim=3)
combined_mask = torch.cat((combined_mask_a, combined_mask_b), dim=2)
combined_encoder_outputs = torch.cat((hidden_states, additional_hidden_states), dim=1)
for idx, fusion_layer in enumerate(self.mid_fusion_layers):
if output_hidden_states:
encoder_outputs[1] = encoder_outputs[1] + (hidden_states,)
additional_encoder_outputs[1] = additional_encoder_outputs[1] + (additional_hidden_states,)
layer_outputs = fusion_layer(
combined_encoder_outputs,
combined_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
adaptor_layers=adaptor_layers,
deep_adaptor_tuning=deep_adaptor_tuning,
deep_adaptor_tuning_ffn_only=deep_adaptor_tuning_ffn_only,
parallel_adaptors=parallel_adaptors,
adaptor_or_prompt_layer_idx=idx+self.config.encoder_layers,
)
if self.config.use_moe:
hidden_states, moe_loss = layer_outputs[0]
encoder_outputs[3] = encoder_outputs[3] + moe_loss
else:
hidden_states = layer_outputs[0]
combined_encoder_outputs = hidden_states
# Split hidden states and update the hidden states
hidden_states, additional_hidden_states = torch.split(hidden_states, (encoder_input_length, additional_encoder_input_length), dim=1)
if output_attentions:
current_attentions = layer_outputs[1]
# Split the attentions and update the attentions
current_attentions, additional_current_attentions = torch.split(current_attentions, (encoder_input_length, additional_encoder_input_length), dim=2) ## This needs to be fixed. Since the number of columns will be (encoder_input_length+additional_encoder_input_length). But this may or may not be important.
encoder_outputs[2] = encoder_outputs[2] + (current_attentions,)
additional_encoder_outputs[2] = additional_encoder_outputs[2] + (additional_current_attentions,)
elif self.config.multi_source_method == "bottleneck_mid_fusion_merge_before_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention":
batch_size = hidden_states.size()[0]
# Expand the bottleneck params to batch size
bottleneck_params = self.bottleneck_params.expand(batch_size, -1, -1)
# Concatenate the bottleneck params with the encoder and additional encoder outputs individually
combined_hidden_states = torch.cat((bottleneck_params, hidden_states), dim=1)
combined_additional_hidden_states = torch.cat((bottleneck_params, additional_hidden_states), dim=1)
# Create a ones mask of shape (batch_size, 1, encoder_input_length, bottleneck_mid_fusion_tokens)
ones_mask = torch.ones(batch_size, 1, encoder_input_length, self.config.bottleneck_mid_fusion_tokens).to(hidden_states.device)
additional_ones_mask = torch.ones(batch_size, 1, additional_encoder_input_length, self.config.bottleneck_mid_fusion_tokens).to(hidden_states.device)
# Expand the masks to accommodate the bottleneck params. We replicate the first row bottleneck_mid_fusion_tokens number of times. Its ok since bottleneck params can attend to itself and should attend to the first token.
encoder_self_attention_mask = torch.cat((ones_mask, encoder_self_attention_mask), dim=3)
encoder_self_attention_mask = torch.cat([encoder_self_attention_mask[:,:,0:1,:].expand(batch_size, 1, self.config.bottleneck_mid_fusion_tokens, self.config.bottleneck_mid_fusion_tokens+encoder_input_length), encoder_self_attention_mask], dim=2)
additional_encoder_self_attention_mask = torch.cat((additional_ones_mask, additional_encoder_self_attention_mask), dim=3)
additional_encoder_self_attention_mask = torch.cat([additional_encoder_self_attention_mask[:,:,0:1,:].expand(batch_size, 1, self.config.bottleneck_mid_fusion_tokens, self.config.bottleneck_mid_fusion_tokens+additional_encoder_input_length), additional_encoder_self_attention_mask], dim=2)
for idx, fusion_layer in enumerate(self.mid_fusion_layers):
if output_hidden_states:
encoder_outputs[1] = encoder_outputs[1] + (hidden_states,)
additional_encoder_outputs[1] = additional_encoder_outputs[1] + (additional_hidden_states,)
layer_outputs = fusion_layer(
combined_hidden_states,
encoder_self_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
adaptor_layers=adaptor_layers,
deep_adaptor_tuning=deep_adaptor_tuning,
deep_adaptor_tuning_ffn_only=deep_adaptor_tuning_ffn_only,
parallel_adaptors=parallel_adaptors,
adaptor_or_prompt_layer_idx=idx+self.config.encoder_layers,
)
additional_layer_outputs = fusion_layer(
combined_additional_hidden_states,
additional_encoder_self_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
adaptor_layers=adaptor_layers,
deep_adaptor_tuning=deep_adaptor_tuning,
deep_adaptor_tuning_ffn_only=deep_adaptor_tuning_ffn_only,
parallel_adaptors=parallel_adaptors,
adaptor_or_prompt_layer_idx=idx+self.config.encoder_layers,
)
if self.config.use_moe:
combined_hidden_states, moe_loss = layer_outputs[0]
combined_additional_hidden_states, additional_moe_loss = additional_layer_outputs[0]
encoder_outputs[3] = encoder_outputs[3] + moe_loss + additional_moe_loss
else:
combined_hidden_states = layer_outputs[0]
combined_additional_hidden_states = additional_layer_outputs[0]
# Split hidden states and additionl hidden states bu discarding the bottleneck parts
bottleneck_params, hidden_states = torch.split(combined_hidden_states, (self.config.bottleneck_mid_fusion_tokens, encoder_input_length), dim=1)
additional_bottleneck_params, additional_hidden_states = torch.split(combined_additional_hidden_states, (self.config.bottleneck_mid_fusion_tokens, additional_encoder_input_length), dim=1)
# Average the bottleneck params
bottleneck_params = (bottleneck_params + additional_bottleneck_params)/2
# Concatenate the bottleneck params with the encoder and additional encoder outputs individually
combined_hidden_states = torch.cat((bottleneck_params, hidden_states), dim=1)
combined_additional_hidden_states = torch.cat((bottleneck_params, additional_hidden_states), dim=1)
if output_attentions:
current_attentions = layer_outputs[1]
additional_current_attentions = additional_layer_outputs[1]
# Split the attentions and update the attentions
## This needs to be fixed. Since the number of columns will be (self.config.bottleneck_mid_fusion_tokens+encoder_input_length) and (self.config.bottleneck_mid_fusion_tokens+additional_encoder_input_length). But this may or may not be important.
current_attentions = torch.split(current_attentions, (self.config.bottleneck_mid_fusion_tokens, encoder_input_length), dim=2)[1]
additional_current_attentions = torch.split(additional_current_attentions, (self.config.bottleneck_mid_fusion_tokens, additional_encoder_input_length), dim=2)[1]
encoder_outputs[2] = encoder_outputs[2] + (current_attentions,)
additional_encoder_outputs[2] = additional_encoder_outputs[2] + (additional_current_attentions,)
# Apply the layer normalization
hidden_states = self.mid_fusion_norm(hidden_states)
additional_hidden_states = self.mid_fusion_norm(additional_hidden_states)
# Update the hidden states
encoder_outputs["last_hidden_state"] = hidden_states
additional_encoder_outputs["last_hidden_state"] = additional_hidden_states
context_encoder_representations = torch.cat((hidden_states, additional_hidden_states), dim=1) ## We use this as a placeholder to prevent any additional computations :)
if self.config.multi_source and self.config.multi_source_method == "additional_source_attention": ## We do a "cross attention" between the sentence and its context. For now this will be recomputed for each decoding time step.
if context_encoder_representations is None:
encoder_input_length = encoder_outputs[0].size()[1]
additional_encoder_input_length = additional_encoder_outputs[0].size()[1]
encoder_self_attention_mask = _expand_mask(attention_mask, encoder_outputs[0].dtype, wait_k=self.config.additional_source_wait_k)
encoder_encoder_cross_attention_mask = _expand_mask(additional_input_ids_mask, encoder_outputs[0].dtype, tgt_len=encoder_input_length, wait_k=self.config.additional_source_wait_k)
context_encoder_representations = self.context_attention(encoder_outputs[0],
attention_mask=encoder_self_attention_mask,
encoder_hidden_states=additional_encoder_outputs[0],
encoder_attention_mask=encoder_encoder_cross_attention_mask,
layer_head_mask=None,
encoder_layer_head_mask=None,
past_key_value=None,
output_attentions=False,
use_cache=False,
additional_encoder_hidden_states=None,
additional_encoder_attention_mask=None,)
context_encoder_representations[0] = self.context_norm(context_encoder_representations[0])
#print(type(encoder_outputs), type(context_encoder_representations))
encoder_outputs["last_hidden_state"] = context_encoder_representations[0]
context_encoder_representations = context_encoder_representations[0]
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
encoder_head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states or self.config.multilayer_softmaxing is not None, ## In case of multilayer softmaxing we need the hidden states ONLY FROM THE DECODER.
return_dict=return_dict,
additional_encoder_hidden_states=additional_encoder_outputs[0],
additional_encoder_attention_mask=additional_input_ids_mask,
curr_decode_length=curr_decode_length,
prompt_params=[prompt_params[2], prompt_params[3], prompt_params[4], prompt_params[5]] if prompt_params is not None else None,
adaptor_layers=adaptor_layers,
deep_adaptor_tuning=deep_adaptor_tuning,
deep_adaptor_tuning_ffn_only=deep_adaptor_tuning_ffn_only,
parallel_adaptors=parallel_adaptors,
)
# if prompt_params is not None and (self.training or curr_decode_length == 1):
# decoder_outputs.last_hidden_state = decoder_outputs.last_hidden_state[:,prompt_params[2][0].size()[1]:,:]
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
additional_encoder_last_hidden_state=additional_encoder_outputs.last_hidden_state if self.config.multi_source else None,
additional_encoder_hidden_states=additional_encoder_outputs.hidden_states if self.config.multi_source else None,
additional_encoder_attentions=additional_encoder_outputs.attentions if self.config.multi_source else None,
additional_cross_attentions=decoder_outputs.additional_cross_attentions if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention") else (),
context_encoder_representations = context_encoder_representations if self.config.multi_source and (self.config.multi_source_method == "additional_source_attention" or self.config.multi_source_method == "mid_fusion_merge_before_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_before_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention" or self.config.multi_source_method == "mid_fusion_merge_after_attention") else None, ## Find a way to return all contents of context_encoder_representations in the future.
encoder_moe_losses = encoder_outputs.moe_losses,
decoder_moe_losses = decoder_outputs.moe_losses,
)
## Modified by Raj Dabre. End.
## Modified by Raj Dabre. Start.
class GradientReversalFunction(Function): ## Glory be to the gradients in reverse. AMEN!
"""
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(nn.Module):
def __init__(self, lambda_=1):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_)
class Prompts(nn.Module):
"""Custom Pytorch model for creating continuous prompts.
"""
def __init__(self, num_prompts, d_model, init_std):
super().__init__()
# initialize weights with random numbers
prompt_params = torch.zeros(1, num_prompts, d_model)
prompt_params.normal_(mean=0.0, std=init_std)
self.prompt_params = torch.nn.Parameter(prompt_params)
ffn1 = torch.zeros(d_model, d_model*4)
ffn1.normal_(mean=0.0, std=init_std)
self.ffn1 = torch.nn.Parameter(ffn1)
self.activation = torch.nn.GELU()
ffn2 = torch.zeros(d_model*4, d_model)
ffn2.normal_(mean=0.0, std=init_std)
self.ffn2 = torch.nn.Parameter(ffn2)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, dummy_arg):
return self.prompt_params + torch.matmul(self.activation(torch.matmul(self.layer_norm(self.prompt_params), self.ffn1)), self.ffn2)
class EncoderDecoderPrompts(nn.Module):
"""Custom Pytorch model for creating continuous prompts.
"""
def __init__(self, num_prompts, encoder_layers, decoder_layers, d_model, init_std):
super().__init__()
# initialize weights with random numbers
self.encoder_prompts_key = torch.nn.ModuleList([Prompts(num_prompts, d_model, init_std) for _ in range(encoder_layers)])
self.decoder_prompts_key_sa = torch.nn.ModuleList([Prompts(num_prompts, d_model, init_std) for _ in range(decoder_layers)])
self.decoder_prompts_key_xa = torch.nn.ModuleList([Prompts(num_prompts, d_model, init_std) for _ in range(decoder_layers)])
self.encoder_prompts_value = torch.nn.ModuleList([Prompts(num_prompts, d_model, init_std) for _ in range(encoder_layers)])
self.decoder_prompts_value_sa = torch.nn.ModuleList([Prompts(num_prompts, d_model, init_std) for _ in range(decoder_layers)])
self.decoder_prompts_value_xa = torch.nn.ModuleList([Prompts(num_prompts, d_model, init_std) for _ in range(decoder_layers)])
print("Number of additional parameters during training are:", (encoder_layers*2)*(d_model*d_model*4*2+ num_prompts*d_model)+(decoder_layers*3)*(d_model*d_model*4*2+ num_prompts*d_model))
print("Number of additional parameters during evaluation are:", (encoder_layers*2)*(num_prompts*d_model)+(decoder_layers*3)*(num_prompts*d_model))
self.num_prompts = num_prompts
self.d_model = d_model
def forward(self, dummy_arg):
return [encoder_prompt(dummy_arg) for encoder_prompt in self.encoder_prompts_key], [encoder_prompt(dummy_arg) for encoder_prompt in self.encoder_prompts_value], [decoder_prompt(dummy_arg) for decoder_prompt in self.decoder_prompts_key_sa], [decoder_prompt(dummy_arg) for decoder_prompt in self.decoder_prompts_value_sa], [decoder_prompt(dummy_arg) for decoder_prompt in self.decoder_prompts_key_xa], [decoder_prompt(dummy_arg) for decoder_prompt in self.decoder_prompts_value_xa]
class Adaptor(nn.Module):
"""Custom Pytorch model for adaptor FFNs. We will pass these to the model and optimize and save them separately.
"""
def __init__(self, d_model, hidden, init_std=0.02, hypercomplex=False, hypercomplex_n=2, layernorm_adaptor_input=False, adaptor_scaling_factor=1.0, residual_connection=False):
super().__init__()
# initialize weights with random numbers
if hypercomplex:
self.ffn1_a = torch.nn.ModuleList()
self.ffn2_a = torch.nn.ModuleList()
self.ffn1_b = torch.nn.ModuleList()
self.ffn2_b = torch.nn.ModuleList()
for _ in range(hypercomplex_n):
ffn1_a = torch.zeros(hypercomplex_n, hypercomplex_n)
ffn1_a.normal_(mean=0.0, std=init_std)
self.ffn1_a.append(torch.nn.Parameter(ffn1_a))
ffn2_a = torch.zeros(hypercomplex_n, hypercomplex_n)
ffn2_a.normal_(mean=0.0, std=init_std)
self.ffn2_a.append(torch.nn.Parameter(ffn2_a))
ffn1_b = torch.zeros(d_model/hypercomplex_n, hidden/hypercomplex_n)
ffn1_b.normal_(mean=0.0, std=init_std)
self.ffn1_b.append(torch.nn.Parameter(ffn1_b))
ffn2_b = torch.zeros(hidden/hypercomplex_n, d_model/hypercomplex_n)
ffn2_b.normal_(mean=0.0, std=init_std)
self.ffn2_b.append(torch.nn.Parameter(ffn2_b))
self.ffn1 = torch.sum(torch.stack([torch.kron(self.ffn1_a[i], self.ffn1_b[i]) for i in range(hypercomplex_n)]), 0)
self.ffn2 = torch.sum(torch.stack([torch.kron(self.ffn2_a[i], self.ffn2_b[i]) for i in range(hypercomplex_n)]), 0)
else:
ffn1 = torch.zeros(d_model, hidden)
ffn1.normal_(mean=0.0, std=init_std)
self.ffn1 = torch.nn.Parameter(ffn1)
ffn2 = torch.zeros(hidden, d_model)
ffn2.normal_(mean=0.0, std=init_std)
self.ffn2 = torch.nn.Parameter(ffn2)
self.activation = torch.nn.GELU()
if layernorm_adaptor_input:
self.layer_norm = nn.LayerNorm(d_model)
else: # Identity
self.layer_norm = None
self.adaptor_scaling_factor = adaptor_scaling_factor
self.residual_connection = residual_connection
def forward(self, input):
if self.layer_norm is not None:
output = self.layer_norm(input)
else:
output = input
output = self.adaptor_scaling_factor * torch.matmul(self.activation(torch.matmul(output, self.ffn1)), self.ffn2) # Don't forget to check if you need the residual connection or not as well as the input layernorm or not.
if self.residual_connection:
return output + input
else:
return output
class EncoderDecoderAdaptors(nn.Module):
"""Custom Pytorch model for creating encoder-decoder adaptors. These adaptors will only be applied to the top encoder and decoder layer.
"""
def __init__(self, d_model, hidden, init_std=0.02, hypercomplex=False, hypercomplex_n=2, layernorm_adaptor_input=False, adaptor_scaling_factor=1.0, residual_connection=False):
super().__init__()
# initialize weights with random numbers
self.encoder_adaptor = Adaptor(d_model, hidden, init_std=init_std, hypercomplex=hypercomplex, hypercomplex_n=hypercomplex_n, layernorm_adaptor_input=layernorm_adaptor_input, adaptor_scaling_factor=adaptor_scaling_factor, residual_connection=residual_connection)
self.decoder_adaptor = Adaptor(d_model, hidden, init_std=init_std, hypercomplex=hypercomplex, hypercomplex_n=hypercomplex_n, layernorm_adaptor_input=layernorm_adaptor_input, adaptor_scaling_factor=adaptor_scaling_factor, residual_connection=residual_connection)
if hypercomplex:
print("Hypercomplex adaptors will be used.")
print("Number of additional parameters during training are:", (d_model*hidden*2*2)/hypercomplex_n + hypercomplex_n**3)
else:
print("Number of additional parameters during training are:", (d_model*hidden*2*2))
def forward(self, input, is_encoder):
if is_encoder:
return self.encoder_adaptor(input)
else:
return self.decoder_adaptor(input)
class DeepEncoderDecoderAdaptors(nn.Module):
"""Custom Pytorch model for creating encoder-decoder adaptors. These adaptors will be applied after each layer.
The adaptors should be lightweight with small hidden params.
"""
def __init__(self, d_model, hidden, encoder_layers, decoder_layers, encoder_adaptor_tying_config=None, decoder_adaptor_tying_config=None, init_std=0.02, hypercomplex=False, hypercomplex_n=2, ffn_only=False, layernorm_adaptor_input=False, adaptor_scaling_factor=1.0, residual_connection=False):
super().__init__()
# initialize weights with random numbers
if encoder_adaptor_tying_config is not None: ## Create unique or shared layers as per sharing configuration.
print("Tied Encoder adaptors with config", encoder_adaptor_tying_config)
layer_idxs = encoder_adaptor_tying_config.strip().split("-")
unique_idxs = sorted(set(layer_idxs))
self.unique_encoder_adaptors = torch.nn.ModuleList([Adaptor(d_model, hidden, init_std=init_std, hypercomplex=hypercomplex, hypercomplex_n=hypercomplex_n, layernorm_adaptor_input=layernorm_adaptor_input, adaptor_scaling_factor=adaptor_scaling_factor, residual_connection=residual_connection) for _ in range(len(unique_idxs)*(1 if ffn_only else 2))])
self.encoder_adaptors = []
for idx in layer_idxs:
if ffn_only:
self.encoder_adaptors.append(self.unique_encoder_adaptors[int(idx)-1])
else:
self.encoder_adaptors.extend([self.unique_encoder_adaptors[(int(idx)-1)*2], self.unique_encoder_adaptors[(int(idx)-1)*2+1]])
unique_encoder_adaptors_count = len(self.unique_encoder_adaptors)
else:
self.encoder_adaptors = torch.nn.ModuleList([Adaptor(d_model, hidden, init_std=init_std, hypercomplex=hypercomplex, hypercomplex_n=hypercomplex_n, layernorm_adaptor_input=layernorm_adaptor_input, adaptor_scaling_factor=adaptor_scaling_factor, residual_connection=residual_connection) for _ in range(encoder_layers*(1 if ffn_only else 2))])
unique_encoder_adaptors_count = len(self.encoder_adaptors)
if decoder_adaptor_tying_config is not None: ## Create unique or shared layers as per sharing configuration.
print("Tied Decoder adaptors with config", decoder_adaptor_tying_config)
layer_idxs = decoder_adaptor_tying_config.strip().split("-")
unique_idxs = sorted(set(layer_idxs))
self.unique_decoder_adaptors = nn.ModuleList([Adaptor(d_model, hidden, init_std=init_std, hypercomplex=hypercomplex, hypercomplex_n=hypercomplex_n, layernorm_adaptor_input=layernorm_adaptor_input, adaptor_scaling_factor=adaptor_scaling_factor, residual_connection=residual_connection) for _ in range(len(unique_idxs)*(1 if ffn_only else 3))])
self.decoder_adaptors = []
for idx in layer_idxs:
if ffn_only:
self.decoder_adaptors.append(self.unique_decoder_adaptors[int(idx)-1])
else:
self.decoder_adaptors.extend([self.unique_decoder_adaptors[(int(idx)-1)*3], self.unique_decoder_adaptors[(int(idx)-1)*3+1], self.unique_decoder_adaptors[(int(idx)-1)*3+2]])
unique_decoder_adaptors_count = len(self.unique_decoder_adaptors)
else:
self.decoder_adaptors = torch.nn.ModuleList([Adaptor(d_model, hidden, init_std=init_std, hypercomplex=hypercomplex, hypercomplex_n=hypercomplex_n, layernorm_adaptor_input=layernorm_adaptor_input, adaptor_scaling_factor=adaptor_scaling_factor, residual_connection=residual_connection) for _ in range(decoder_layers*(1 if ffn_only else 3))])
unique_decoder_adaptors_count = len(self.decoder_adaptors)
if hypercomplex:
print("Hypercomplex adaptors will be used.")
print("Number of additional parameters during training are:", ((d_model*hidden*2)/hypercomplex_n + hypercomplex_n**3)*(unique_encoder_adaptors_count+unique_decoder_adaptors_count))
else:
if ffn_only:
print("Adaptors will be used after FFN only.")
print("Number of additional parameters during training are:", (d_model*hidden*2)*(unique_encoder_adaptors_count+unique_decoder_adaptors_count))
def forward(self, input, is_encoder, layer_idx):
if is_encoder:
return self.encoder_adaptors[layer_idx](input)
else:
return self.decoder_adaptors[layer_idx](input)
@add_start_docstrings(
"The MBART Model with a language modeling head. Can be used for summarization.", MBART_START_DOCSTRING
)
class MBartForConditionalGeneration(MBartPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder\.version",
r"decoder\.version",
r"lm_head\.weight",
r"prompt_params",
r"adaptor_layers",
]
def __init__(self, config: MBartConfig):
super().__init__(config)
self.model = MBartModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
if config.multilayer_softmaxing is not None:
config.multilayer_softmaxing = [int(layer_id) for layer_id in config.multilayer_softmaxing.split(",")]
if config.temperature_calibration:
assert config.softmax_temperature == 1.0
print("Temperature calibration will be done.")
self.register_parameter("softmax_temperature", torch.ones(1))
print("Initial temperature is: ", self.softmax_temperature)
if config.num_domains_for_domain_classifier > 1:
print("Domain classifier will be used.")
self.domain_classifer_head = nn.Linear(config.d_model, config.num_domains_for_domain_classifier, bias=False)
if config.gradient_reversal_for_domain_classifier:
self.gradient_reversal_layer = GradientReversal()
if config.prompt_tuning:
print("Prompt tuning will be done.")
self.prompt_params = EncoderDecoderPrompts(config.num_prompts, config.encoder_layers, config.decoder_layers, config.d_model, config.init_std)
if config.adaptor_tuning:
print("Shallow adaptor tuning will be done.")
self.adaptor_layers = EncoderDecoderAdaptors(config.d_model, config.adaptor_hidden_size, config.init_std, config.hypercomplex, config.hypercomplex_n, config.layernorm_adaptor_input, config.adaptor_scaling_factor, config.residual_connection_adaptor)
elif config.deep_adaptor_tuning or config.deep_adaptor_tuning_ffn_only:
print("Deep adaptor tuning will be done.")
if config.parallel_adaptors:
print("Parallel adaptors will be used.")
self.adaptor_layers = DeepEncoderDecoderAdaptors(config.d_model, config.adaptor_hidden_size, config.encoder_layers, config.decoder_layers, config.encoder_adaptor_tying_config, config.decoder_adaptor_tying_config, config.init_std, config.hypercomplex, config.hypercomplex_n, config.deep_adaptor_tuning_ffn_only, config.layernorm_adaptor_input, config.adaptor_scaling_factor, config.residual_connection_adaptor)
if config.softmax_bias_tuning:
print("Softmax bias tuning will be done. Replacing the final logits bias with a learnable parameter.")
self.final_logits_bias = nn.Parameter(torch.zeros(self.model.shared.num_embeddings).normal_(mean=0.0, std=config.init_std))
self.init_weights()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def initialize_prompt_params_with_random_embeddings(self):
""" Using random prompts as initial params is bad. Apparently its better to use random pretrained embeddings from the model.
"""
print("Initializing prompt params with random embedding weights.")
embeds = self.model.shared.weight.detach().clone().requires_grad_(True)
num_embeds = embeds.size()[0]
num_prompts = self.config.num_prompts
with torch.no_grad():
for i in range(len(self.prompt_params.encoder_prompts_key)):
for prompt_id in range(num_prompts):
self.prompt_params.encoder_prompts_key[i].prompt_params[0, prompt_id, :] = embeds[random.randint(0, num_embeds-1)] ## initialize with existing embeddings
self.prompt_params.encoder_prompts_value[i].prompt_params[0, prompt_id, :] = embeds[random.randint(0, num_embeds-1)] ## initialize with existing embeddings
for i in range(len(self.prompt_params.decoder_prompts_key_sa)):
for prompt_id in range(num_prompts):
self.prompt_params.decoder_prompts_key_sa[i].prompt_params[0, prompt_id, :] = embeds[random.randint(0, num_embeds-1)] ## initialize with existing embeddings
self.prompt_params.decoder_prompts_value_sa[i].prompt_params[0, prompt_id, :] = embeds[random.randint(0, num_embeds-1)] ## initialize with existing embeddings
self.prompt_params.decoder_prompts_key_xa[i].prompt_params[0, prompt_id, :] = embeds[random.randint(0, num_embeds-1)] ## initialize with existing embeddings
self.prompt_params.decoder_prompts_value_xa[i].prompt_params[0, prompt_id, :] = embeds[random.randint(0, num_embeds-1)] ## initialize with existing embeddings
@add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(MBART_GENERATION_EXAMPLE)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
additional_input_ids=None,
additional_input_ids_mask=None,
additional_encoder_outputs=None,
additional_past_key_values=None,
curr_decode_length=-1,
context_encoder_representations=None,
label_mask=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
if self.config.multi_source_method == "average_softmaxes":
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
additional_input_ids=None,
additional_input_ids_mask=None,
additional_encoder_outputs=None,
curr_decode_length=curr_decode_length,
prompt_params=self.prompt_params(0) if self.config.prompt_tuning and (self.training or curr_decode_length == 1) else None, ## Dont need this during decoding when curr decode length > 1 set to none and save headache.
adaptor_layers=self.adaptor_layers if self.config.adaptor_tuning or self.config.deep_adaptor_tuning or self.config.deep_adaptor_tuning_ffn_only else None,
deep_adaptor_tuning=self.config.deep_adaptor_tuning, ## TODO: make this a part of the object's attributes and access from there
deep_adaptor_tuning_ffn_only = self.config.deep_adaptor_tuning_ffn_only,
parallel_adaptors=self.config.parallel_adaptors,
)
lm_logits = (self.lm_head(outputs[0]) + self.final_logits_bias)/self.config.softmax_temperature ## Divide the logits by a temperature to get a smoothed softmax.
if self.config.temperature_calibration:
lm_logits = lm_logits/self.softmax_temperature ## The softmax_temperature config param should be 1.0
additional_outputs = self.model(
additional_input_ids,
attention_mask=additional_input_ids_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=additional_encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
past_key_values=additional_past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
additional_input_ids=None,
additional_input_ids_mask=None,
additional_encoder_outputs=None,
curr_decode_length=curr_decode_length,
prompt_params=self.prompt_params(0) if self.config.prompt_tuning and (self.training or curr_decode_length == 1) else None,
adaptor_layers=self.adaptor_layers if self.config.adaptor_tuning or self.config.deep_adaptor_tuning or self.config.deep_adaptor_tuning_ffn_only else None,
deep_adaptor_tuning=self.config.deep_adaptor_tuning,
deep_adaptor_tuning_ffn_only = self.config.deep_adaptor_tuning_ffn_only,
parallel_adaptors=self.config.parallel_adaptors,
)
additional_source_lm_logits = (self.lm_head(additional_outputs[0]) + self.final_logits_bias)/self.config.softmax_temperature ## Divide the logits by a temperature to get a smoothed softmax.
if self.config.temperature_calibration:
additional_source_lm_logits = additional_source_lm_logits/self.softmax_temperature ## The softmax_temperature config param should be 1.0
else:
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
additional_input_ids=additional_input_ids,
additional_input_ids_mask=additional_input_ids_mask,
additional_encoder_outputs=additional_encoder_outputs,
curr_decode_length=curr_decode_length,
context_encoder_representations=context_encoder_representations,
prompt_params=self.prompt_params(0) if self.config.prompt_tuning and (self.training or curr_decode_length == 1) else None,
adaptor_layers=self.adaptor_layers if self.config.adaptor_tuning or self.config.deep_adaptor_tuning or self.config.deep_adaptor_tuning_ffn_only else None,
deep_adaptor_tuning=self.config.deep_adaptor_tuning,
deep_adaptor_tuning_ffn_only = self.config.deep_adaptor_tuning_ffn_only,
parallel_adaptors=self.config.parallel_adaptors,
)
lm_logits = (self.lm_head(outputs[0]) + self.final_logits_bias)/self.config.softmax_temperature ## Divide the logits by a temperature to get a smoothed softmax.
if self.config.temperature_calibration:
lm_logits = lm_logits/self.softmax_temperature
additional_lm_logits = []
if self.config.multilayer_softmaxing is not None:
for layer_id in self.config.multilayer_softmaxing: ## We count the embedding layer too. Who knows what may happen? However we wont do anything for the final layer as its already dealt with.
lm_representation = outputs.decoder_hidden_states[layer_id]
additional_lm_logits.append((self.lm_head(lm_representation) + self.final_logits_bias)/self.config.softmax_temperature) ## The additional logits will be collected here and then returned to my main code. Divide the logits by a temperature to get a smoothed softmax.
if self.config.temperature_calibration:
additional_lm_logits[-1] = additional_lm_logits[-1]/self.softmax_temperature ## The softmax_temperature config param should be 1.0
if self.config.num_domains_for_domain_classifier > 1: ## Pool the output layer representations by taking a mean and then generate logits for them.
dom_pooled_outputs = outputs[0].masked_fill(label_mask, 0.0).mean(dim=1)
if self.config.gradient_reversal_for_domain_classifier: ## If we want to do gradient reversal then thats going ot be done here.
dom_pooled_outputs = self.gradient_reversal_layer(dom_pooled_outputs)
domain_classifier_logits = self.domain_classifer_head(dom_pooled_outputs)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
additional_lm_logits=additional_lm_logits,
additional_encoder_last_hidden_state=outputs.additional_encoder_last_hidden_state if self.config.multi_source else None,
additional_encoder_hidden_states=outputs.additional_encoder_hidden_states if self.config.multi_source else None,
additional_encoder_attentions=outputs.additional_encoder_attentions if self.config.multi_source else None,
additional_cross_attentions=outputs.additional_cross_attentions if self.config.multi_source and (self.config.multi_source_method == "merge_after_attention" or self.config.multi_source_method == "self_relevance_and_merge_after_attention" or self.config.multi_source_method == "merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "self_relevance_and_merge_after_attention_with_context_relevance_only" or self.config.multi_source_method == "average_softmaxes" or self.config.multi_source_method == "mid_fusion_merge_after_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention") else (),
additional_past_key_values=additional_outputs.past_key_values if self.config.multi_source and (self.config.multi_source_method == "average_softmaxes") else None,
additional_source_lm_logits=additional_source_lm_logits if self.config.multi_source and (self.config.multi_source_method == "average_softmaxes") else None,
context_encoder_representations = outputs.context_encoder_representations if self.config.multi_source and (self.config.multi_source_method == "additional_source_attention" or self.config.multi_source_method == "mid_fusion_merge_before_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_before_attention" or self.config.multi_source_method == "bottleneck_mid_fusion_merge_after_attention" or self.config.multi_source_method == "mid_fusion_merge_after_attention") else None,
softmax_temperature = self.softmax_temperature if self.config.temperature_calibration else None,
domain_classifier_logits = domain_classifier_logits if self.config.num_domains_for_domain_classifier > 1 else None,
encoder_moe_losses = outputs.encoder_moe_losses,
decoder_moe_losses = outputs.decoder_moe_losses,
)
def prepare_inputs_for_generation(
self, decoder_input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
"additional_input_ids": None, # additional_encoder_outputs is defined. additional_input_ids not needed
"additional_input_ids_mask": kwargs["additional_input_ids_mask"] if self.config.multi_source else None, ## This will contain the additional encoder outputs.
"additional_encoder_outputs": kwargs["additional_encoder_outputs"] if self.config.multi_source else None, ## This will contain the additional encoder outputs.
"additional_past_key_values": kwargs["additional_past"] if self.config.multi_source_method == "average_softmaxes" and "additional_past" in kwargs else None, ## This is for the past of the additional source when averaging softmaxes.
"context_encoder_representations": kwargs["context_encoder_representations"] if self.config.multi_source else None, ## A bit sloppy and should be controlled by an additional condition looking at the value of multi_source type.
# "prompt_params": kwargs["prompt_params"] if self.config.prompt_tuning else None, ## Dare not forget this. 26th April 2022 is the day I had a brain fart.
# "adaptor_layers": kwargs["adaptor_layers"] if self.config.adaptor_tuning or self.config.deep_adaptor_tuning or self.config.deep_adaptor_tuning_ffn_only else None, ## Dare not forget this. 26th April 2022 is the day I had a brain fart.
# "deep_adaptor_tuning": kwargs["deep_adaptor_tuning"] if self.config.adaptor_tuning or self.config.deep_adaptor_tuning or self.config.deep_adaptor_tuning_ffn_only else False, ## Dare not forget this. 26th April 2022 is the day I had a brain fart.
# "deep_adaptor_tuning_ffn_only": kwargs["deep_adaptor_tuning_ffn_only"] if self.config.adaptor_tuning or self.config.deep_adaptor_tuning or self.config.deep_adaptor_tuning_ffn_only else False, ## Dare not forget this. 26th April 2022 is the day I had a brain fart.
# "parallel_adaptors": kwargs["parallel_adaptors"] if self.config.adaptor_tuning or self.config.deep_adaptor_tuning or self.config.deep_adaptor_tuning_ffn_only else False, ## Dare not forget this. 26th April 2022 is the day I had a brain fart.
}
## Modified by Raj Dabre. End.
def adjust_logits_during_generation(self, logits, cur_len, max_length):
if cur_len == max_length - 1 and self.config.eos_token_id is not None:
self._force_token_id_to_be_generated(logits, self.config.eos_token_id)
return logits
@staticmethod
def _force_token_id_to_be_generated(scores, token_id) -> None:
"""force one of token_ids to be generated by setting prob of all other tokens to 0 (logprob=-float("inf"))"""
scores[:, [x for x in range(scores.shape[1]) if x != token_id]] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
@add_start_docstrings(
"""
MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
""",
MBART_START_DOCSTRING,
)
class MBartForSequenceClassification(MBartPreTrainedModel):
def __init__(self, config: MBartConfig, **kwargs):
super().__init__(config, **kwargs)
self.model = MBartModel(config)
self.classification_head = MBartClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
self.model._init_weights(self.classification_head.dense)
self.model._init_weights(self.classification_head.out_proj)
@add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/mbart-large-cc25",
output_type=Seq2SeqSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id)
if len(torch.unique(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
:, -1, :
]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""
MBART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MBART_START_DOCSTRING,
)
class MBartForQuestionAnswering(MBartPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.model = MBartModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.model._init_weights(self.qa_outputs)
@add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/mbart-large-cc25",
output_type=Seq2SeqQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bart.modeling_bart.BartForQuestionAnswering.forward
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
start_positions=None,
end_positions=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if start_positions is not None and end_positions is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (
start_logits,
end_logits,
) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return Seq2SeqQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->MBart
class MBartDecoderWrapper(MBartPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the :class:`~transformers.EncoderDecoderModel` framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = MBartDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->MBart
class MBartForCausalLM(MBartPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
self.model = MBartDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.MBartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,
config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
Example::
>>> from transformers import MBartTokenizer, MBartForCausalLM
>>> tokenizer = MBartTokenizer.from_pretrained('facebook/bart-large')
>>> model = MBartForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
encoder_head_mask=encoder_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past | [
"torch.nn.Linear",
"torch.cat",
"torch.einsum",
"torch.nn.ModuleList",
"torch.isnan",
"torch.finfo",
"torch.bmm",
"torch.nn.Parameter",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.tril",
"torch.tensor",
"torch.zeros",
"torch.nn.functional.dropout",
"torch.clamp",
"torch.full",
"torch.isinf",
"torch.nn.functional.softmax",
"torch.nn.GELU",
"torch.matmul",
"torch.pow",
"torch.nn.Dropout",
"torch.arange",
"torch.no_grad",
"torch.split",
"torch.kron",
"torch.tanh",
"torch.nn.Embedding"
] | 1.7.1 | koukoulala/yanmtt | 24a499aff8f587e6de252dd67a64471fce6fe71b |
1.1 | """Python Script Template."""
from abc import ABCMeta
import torch
from hucrl.policy.augmented_policy import AugmentedPolicy
from rllib.policy import AbstractPolicy, NNPolicy
class AdversarialPolicy(AbstractPolicy, metaclass=ABCMeta):
"""Given a protagonist and an antagonist policy, combine to give a joint policy."""
def __init__(
self,
protagonist_policy,
antagonist_policy,
hallucination_policy=None,
protagonist=True,
*args,
**kwargs,
) -> None:
super().__init__(
deterministic=protagonist_policy.deterministic,
dist_params=protagonist_policy.dist_params,
*args,
**kwargs,
)
self._protagonist_policy = protagonist_policy
self._antagonist_policy = antagonist_policy
assert protagonist_policy.dim_state == antagonist_policy.dim_state
assert protagonist_policy.dim_state == self.dim_state
self._protagonist = protagonist
if hallucination_policy is None:
hallucination_policy = NNPolicy(
dim_state=self.dim_state, dim_action=self.dim_action
)
self._hallucination_policy = hallucination_policy
@property
def protagonist_policy(self):
"""Return protagonist policy."""
if isinstance(self._protagonist_policy, AugmentedPolicy):
return self._protagonist_policy.true_policy
else:
return self._protagonist_policy
def set_protagonist_policy(self, new_policy):
"""Set protagonist policy."""
if isinstance(self._protagonist_policy, AugmentedPolicy):
if isinstance(new_policy, AugmentedPolicy):
self._protagonist_policy.true_policy = new_policy.true_policy
self._protagonist_policy.hallucination_policy = (
new_policy.hallucination_policy
)
else:
self._protagonist_policy.true_policy = new_policy
else:
self._protagonist_policy = new_policy
@property
def antagonist_policy(self):
"""Return antagonist policy."""
if isinstance(self._antagonist_policy, AugmentedPolicy):
return self._antagonist_policy.true_policy
else:
return self._antagonist_policy
def set_antagonist_policy(self, new_policy):
"""Set antagonist policy."""
if isinstance(self._antagonist_policy, AugmentedPolicy):
if isinstance(new_policy, AugmentedPolicy):
self._antagonist_policy.true_policy = new_policy.true_policy
self._antagonist_policy.hallucination_policy = (
new_policy.hallucination_policy
)
else:
self._antagonist_policy.true_policy = new_policy
else:
self._antagonist_policy = new_policy
@property
def hallucination_policy(self):
"""Return hallucination policy."""
if isinstance(self._protagonist_policy, AugmentedPolicy) and self.protagonist:
return self._protagonist_policy.hallucination_policy
elif isinstance(self._antagonist_policy, AugmentedPolicy) and self.antagonist:
return self._antagonist_policy.hallucination_policy
else:
return self._hallucination_policy
def set_hallucination_policy(self, new_policy):
"""Set hallucination policy."""
if isinstance(self._protagonist_policy, AugmentedPolicy):
if self.protagonist:
self._protagonist_policy.hallucination_policy = new_policy
else:
self._antagonist_policy.true_policy = new_policy
else:
self._hallucination_policy = new_policy
@property
def protagonist(self):
"""Return true if it is in protagonist mode."""
return self._protagonist
@protagonist.setter
def protagonist(self, new_value):
"""Set protagonist value."""
self._protagonist = new_value
@property
def antagonist(self):
"""Return true if it is in antagonist mode."""
return not self._protagonist
@antagonist.setter
def antagonist(self, new_value):
"""Set protagonist value."""
self._protagonist = not new_value
@property
def deterministic(self):
"""Get flag if the policy is deterministic or not."""
return self._deterministic
@deterministic.setter
def deterministic(self, value):
"""Set flag if the policy is deterministic or not."""
self._deterministic = value
self.protagonist_policy.deterministic = value
self.antagonist_policy.deterministic = value
self.hallucination_policy.deterministic = value
@torch.jit.export
def reset(self):
"""Reset policy parameters (for example internal states)."""
super().reset()
self.protagonist_policy.reset()
self.antagonist_policy.reset()
self.hallucination_policy.reset()
@torch.jit.export
def update(self):
"""Update policy parameters."""
super().update()
self.protagonist_policy.update()
self.antagonist_policy.update()
self.hallucination_policy.update()
@torch.jit.export
def set_goal(self, goal=None):
"""Set policy goal."""
super().set_goal(goal)
self.protagonist_policy.set_goal(goal)
self.antagonist_policy.set_goal(goal)
self.hallucination_policy.set_goal(goal)
def stack_policies(self, means, stds):
"""Stack a set of policies."""
mean = torch.cat(means, dim=-1)[..., : self.dim_action[0]]
std = torch.cat(stds, dim=-1)[..., : self.dim_action[0]]
return mean, std.diag_embed()
def forward(self, state):
"""Forward compute the policy."""
raise NotImplementedError
| [
"torch.cat"
] | 1.1.1 | sebascuri/rhucrl | 27663e1302f3bbc636dff28495c6f2667bb7c1da |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Any, Optional
from pytorch_lightning.metrics.metric import Metric
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.metrics.functional.explained_variance import (
_explained_variance_update,
_explained_variance_compute,
)
class ExplainedVariance(Metric):
"""
Computes explained variance.
Forward accepts
- ``preds`` (float tensor): ``(N,)`` or ``(N, ...)`` (multioutput)
- ``target`` (long tensor): ``(N,)`` or ``(N, ...)`` (multioutput)
In the case of multioutput, as default the variances will be uniformly
averaged over the additional dimensions. Please see argument `multioutput`
for changing this behavior.
Args:
multioutput:
Defines aggregation in the case of multiple output scores. Can be one
of the following strings (default is `'uniform_average'`.):
* `'raw_values'` returns full set of scores
* `'uniform_average'` scores are uniformly averaged
* `'variance_weighted'` scores are weighted by their individual variances
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
Example:
>>> from pytorch_lightning.metrics import ExplainedVariance
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> explained_variance = ExplainedVariance()
>>> explained_variance(preds, target)
tensor(0.9572)
>>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])
>>> explained_variance = ExplainedVariance(multioutput='raw_values')
>>> explained_variance(preds, target)
tensor([0.9677, 1.0000])
"""
def __init__(
self,
multioutput: str = 'uniform_average',
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
allowed_multioutput = ('raw_values', 'uniform_average', 'variance_weighted')
if multioutput not in allowed_multioutput:
raise ValueError(
f'Invalid input to argument `multioutput`. Choose one of the following: {allowed_multioutput}'
)
self.multioutput = multioutput
self.add_state("y", default=[], dist_reduce_fx=None)
self.add_state("y_pred", default=[], dist_reduce_fx=None)
rank_zero_warn(
'Metric `ExplainedVariance` will save all targets and'
' predictions in buffer. For large datasets this may lead'
' to large memory footprint.'
)
def update(self, preds: torch.Tensor, target: torch.Tensor):
"""
Update state with predictions and targets.
Args:
preds: Predictions from model
target: Ground truth values
"""
preds, target = _explained_variance_update(preds, target)
self.y_pred.append(preds)
self.y.append(target)
def compute(self):
"""
Computes explained variance over state.
"""
preds = torch.cat(self.y_pred, dim=0)
target = torch.cat(self.y, dim=0)
return _explained_variance_compute(preds, target, self.multioutput)
| [
"torch.cat"
] | 1.3 | wdmwhh/pytorch-lightning | 5d10a36762776c4b6f6a9c55b4e6bf7bd258137f |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
from distutils.version import LooseVersion
from unittest.mock import patch
import pytest
import torch
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import IterableDataset, Subset
from torch.utils.data.distributed import DistributedSampler
import tests.base.develop_pipelines as tpipes
from pytorch_lightning import Trainer, Callback
from pytorch_lightning.utilities.data import has_iterable_dataset, has_len
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
def test_fit_train_loader_only(tmpdir):
model = EvalModelTemplate()
train_dataloader = model.train_dataloader()
model.train_dataloader = None
model.val_dataloader = None
model.test_dataloader = None
model.validation_step = None
model.validation_epoch_end = None
model.test_step = None
model.test_epoch_end = None
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, train_dataloader=train_dataloader)
def test_fit_val_loader_only(tmpdir):
model = EvalModelTemplate()
train_dataloader = model.train_dataloader()
val_dataloader = model.val_dataloader()
model.train_dataloader = None
model.val_dataloader = None
model.test_dataloader = None
model.test_step = None
model.test_epoch_end = None
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)
@pytest.mark.parametrize("dataloader_options", [
dict(val_check_interval=10000),
])
def test_dataloader_config_errors_runtime(tmpdir, dataloader_options):
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
**dataloader_options,
)
with pytest.raises(ValueError):
# fit model
trainer.fit(model)
@pytest.mark.parametrize("dataloader_options", [
dict(limit_train_batches=-0.1),
dict(limit_train_batches=1.2),
dict(limit_val_batches=-0.1),
dict(limit_val_batches=1.2),
dict(limit_test_batches=-0.1),
dict(limit_test_batches=1.2),
dict(val_check_interval=-0.1),
dict(val_check_interval=1.2),
dict(overfit_batches=-0.1),
dict(overfit_batches=1.2),
])
def test_dataloader_config_errors_init(tmpdir, dataloader_options):
with pytest.raises(MisconfigurationException, match='passed invalid value'):
Trainer(
default_root_dir=tmpdir,
max_epochs=1,
**dataloader_options,
)
def test_multiple_val_dataloader(tmpdir):
"""Verify multiple val_dataloader."""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=1.0,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
# verify there are 2 val loaders
assert len(trainer.val_dataloaders) == 2, \
'Multiple val_dataloaders not initiated properly'
# make sure predictions are good for each val set
for dataloader in trainer.val_dataloaders:
tpipes.run_prediction(dataloader, trainer.model)
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
def test_multiple_test_dataloader(tmpdir, ckpt_path):
"""Verify multiple test_dataloader."""
model_template = EvalModelTemplate()
class MultipleTestDataloaderModel(EvalModelTemplate):
def test_dataloader(self):
return model_template.test_dataloader__multiple()
def test_step(self, batch, batch_idx, *args, **kwargs):
return model_template.test_step__multiple_dataloaders(batch, batch_idx, *args, **kwargs)
model = MultipleTestDataloaderModel()
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
trainer.fit(model)
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
trainer.test(ckpt_path=ckpt_path)
# verify there are 2 test loaders
assert len(trainer.test_dataloaders) == 2, \
'Multiple test_dataloaders not initiated properly'
# make sure predictions are good for each test set
for dataloader in trainer.test_dataloaders:
tpipes.run_prediction(dataloader, trainer.model)
# run the test method
trainer.test(ckpt_path=ckpt_path)
def test_train_dataloader_passed_to_fit(tmpdir):
"""Verify that train dataloader can be passed to fit """
# only train passed to fit
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True))
result = trainer.fit(model, **fit_options)
assert result == 1
def test_train_val_dataloaders_passed_to_fit(tmpdir):
""" Verify that train & val dataloader can be passed to fit """
# train, val passed to fit
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
result = trainer.fit(model, **fit_options)
assert result == 1
assert len(trainer.val_dataloaders) == 1, \
f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
def test_all_dataloaders_passed_to_fit(tmpdir, ckpt_path):
"""Verify train, val & test dataloader(s) can be passed to fit and test method"""
model = EvalModelTemplate()
# train, val and test passed to fit
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
result = trainer.fit(model, **fit_options)
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
test_options = dict(test_dataloaders=model.dataloader(train=False),
ckpt_path=ckpt_path)
trainer.test(**test_options)
assert result == 1
assert len(trainer.val_dataloaders) == 1, \
f'val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 1, \
f'test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
def test_multiple_dataloaders_passed_to_fit(tmpdir, ckpt_path):
"""Verify that multiple val & test dataloaders can be passed to fit."""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
# train, multiple val and multiple test passed to fit
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=[model.dataloader(train=False),
model.dataloader(train=False)])
trainer.fit(model, **fit_options)
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
test_options = dict(test_dataloaders=[model.dataloader(train=False),
model.dataloader(train=False)],
ckpt_path=ckpt_path)
trainer.test(**test_options)
assert len(trainer.val_dataloaders) == 2, \
f'Multiple `val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 2, \
f'Multiple `test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
@pytest.mark.parametrize(['limit_train_batches', 'limit_val_batches', 'limit_test_batches'], [
pytest.param(0.0, 0.0, 0.0),
pytest.param(1.0, 1.0, 1.0),
])
def test_inf_dataloaders_with_limit_percent_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):
"""Verify inf train, val & test dataloaders (e.g. IterableDataset) passed with batch limit in percent"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
model.val_dataloader = model.val_dataloader__infinite
model.test_dataloader = model.test_dataloader__infinite
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
results = trainer.fit(model)
assert results == 1
assert trainer.num_training_batches == (0 if limit_train_batches == 0.0 else float('inf'))
assert trainer.num_val_batches[0] == (0 if limit_val_batches == 0.0 else float('inf'))
trainer.test(ckpt_path=None)
assert trainer.num_test_batches[0] == (0 if limit_test_batches == 0.0 else float('inf'))
@pytest.mark.parametrize(['limit_train_batches', 'limit_val_batches', 'limit_test_batches'], [
pytest.param(0, 0, 0),
pytest.param(10, 10, 10),
])
def test_inf_dataloaders_with_limit_num_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):
"""Verify inf train, val & test dataloaders (e.g. IterableDataset) passed with batch limit as number"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
model.val_dataloader = model.val_dataloader__infinite
model.test_dataloader = model.test_dataloader__infinite
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
results = trainer.fit(model)
assert results
assert trainer.num_training_batches == limit_train_batches
assert trainer.num_val_batches[0] == limit_val_batches
trainer.test(ckpt_path=None)
assert trainer.num_test_batches[0] == limit_test_batches
@pytest.mark.parametrize(
['limit_train_batches', 'limit_val_batches', 'limit_test_batches'],
[
pytest.param(0.0, 0.0, 0.0),
pytest.param(0, 0, 0.5),
pytest.param(1.0, 1.0, 1.0),
pytest.param(0.2, 0.4, 0.4),
]
)
def test_dataloaders_with_limit_percent_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):
"""Verify num_batches for train, val & test dataloaders passed with batch limit in percent"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple_mixed_length
model.test_dataloader = model.test_dataloader__multiple_mixed_length
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
model.test_epoch_end = model.test_epoch_end__multiple_dataloaders
# train, multiple val and multiple test passed with percent_check
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
trainer.fit(model)
expected_train_batches = int(len(trainer.train_dataloader) * limit_train_batches)
expected_val_batches = [
int(len(dataloader) * limit_val_batches) for dataloader in trainer.val_dataloaders
]
assert trainer.num_training_batches == expected_train_batches
assert trainer.num_val_batches == expected_val_batches
trainer.test(ckpt_path=None)
expected_test_batches = [
int(len(dataloader) * limit_test_batches) for dataloader in trainer.test_dataloaders
]
assert trainer.num_test_batches == expected_test_batches
@pytest.mark.parametrize(
['limit_train_batches', 'limit_val_batches', 'limit_test_batches'],
[
pytest.param(0, 0, 0),
pytest.param(1, 2, 3),
pytest.param(1, 2, 1e50),
]
)
def test_dataloaders_with_limit_num_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):
"""Verify num_batches for train, val & test dataloaders passed with batch limit as number"""
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple_mixed_length
model.test_dataloader = model.test_dataloader__multiple_mixed_length
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
model.test_epoch_end = model.test_epoch_end__multiple_dataloaders
# train, multiple val and multiple test passed with percent_check
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
trainer.fit(model)
# -------------------------------------------
# MAKE SURE THE TRAINER SET THE CORRECT VALUES
# -------------------------------------------
assert trainer.num_training_batches == limit_train_batches
assert trainer.num_val_batches == [limit_val_batches] * len(trainer.val_dataloaders)
trainer.test(ckpt_path=None)
# when the limit is greater than the number of test batches it should be the num in loaders
test_dataloader_lengths = [len(x) for x in model.test_dataloader()]
if limit_test_batches > 1e10:
assert trainer.num_test_batches == test_dataloader_lengths
else:
assert trainer.num_test_batches == [limit_test_batches] * len(trainer.test_dataloaders)
# -------------------------------------------
# make sure we actually saw the expected num of batches
# -------------------------------------------
num_val_dataloaders = len(model.val_dataloader())
num_test_dataloaders = len(model.test_dataloader())
if limit_train_batches > 0:
# make sure val batches are as expected
assert len(trainer.dev_debugger.num_seen_val_check_batches) == num_val_dataloaders
for dataloader_idx, num_batches in trainer.dev_debugger.num_seen_val_check_batches.items():
assert num_batches == limit_val_batches
# make sure test batches are as expected
assert len(trainer.dev_debugger.num_seen_test_check_batches) == num_test_dataloaders
for dataloader_idx, num_batches in trainer.dev_debugger.num_seen_test_check_batches.items():
if limit_test_batches > 1e10:
assert num_batches == test_dataloader_lengths[dataloader_idx]
else:
assert num_batches == limit_test_batches
def test_dataloaders_with_fast_dev_run(tmpdir):
"""Verify num_batches for train, val & test dataloaders passed with fast_dev_run = True"""
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple_mixed_length
model.test_dataloader = model.test_dataloader__multiple_mixed_length
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
model.test_epoch_end = model.test_epoch_end__multiple_dataloaders
# train, multiple val and multiple test dataloaders passed with fast_dev_run = True
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
fast_dev_run=True,
)
assert trainer.max_epochs == 1
assert trainer.num_sanity_val_steps == 0
trainer.fit(model)
assert not trainer.disable_validation
assert trainer.num_training_batches == 1
assert trainer.num_val_batches == [1] * len(trainer.val_dataloaders)
trainer.test(ckpt_path=None)
assert trainer.num_test_batches == [1] * len(trainer.test_dataloaders)
# verify sanity check batches match as expected
num_val_dataloaders = len(model.val_dataloader())
assert trainer.dev_debugger.num_seen_sanity_check_batches == trainer.num_sanity_val_steps * num_val_dataloaders
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
def test_mixing_of_dataloader_options(tmpdir, ckpt_path):
"""Verify that dataloaders can be passed to fit"""
model = EvalModelTemplate()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
# fit model
trainer = Trainer(**trainer_options)
results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))
assert results
# fit model
trainer = Trainer(**trainer_options)
results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))
assert results
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
trainer.test(test_dataloaders=model.dataloader(train=False), ckpt_path=ckpt_path)
assert len(trainer.val_dataloaders) == 1, \
f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 1, \
f'`test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
def test_train_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=0.5)
with pytest.raises(MisconfigurationException, match='using an IterableDataset'):
trainer.fit(model)
def test_val_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.5)
with pytest.raises(MisconfigurationException, match='using an IterableDataset'):
trainer.fit(model)
def test_test_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.test_dataloader = model.test_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_test_batches=0.5)
with pytest.raises(MisconfigurationException, match='using an IterableDataset'):
trainer.test(model)
@pytest.mark.parametrize('check_interval', [50, 1.0])
def test_inf_train_dataloader(tmpdir, check_interval):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=check_interval,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
@pytest.mark.parametrize('check_interval', [1.0])
def test_inf_val_dataloader(tmpdir, check_interval):
"""Test inf val data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__infinite
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=check_interval,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
def test_error_on_zero_len_dataloader(tmpdir):
""" Test that error is raised if a zero-length dataloader is defined """
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__zero_length
# fit model
with pytest.raises(ValueError):
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0.1,
limit_test_batches=0.1,
)
trainer.fit(model)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
@patch('pytorch_lightning.trainer.data_loading.multiprocessing.cpu_count', return_value=4)
def test_warning_with_few_workers(mock, tmpdir, ckpt_path):
""" Test that error is raised if dataloader with only a few workers is used """
model = EvalModelTemplate()
# logger file to get meta
train_dl = model.dataloader(train=True)
train_dl.num_workers = 0
val_dl = model.dataloader(train=False)
val_dl.num_workers = 0
train_dl = model.dataloader(train=False)
train_dl.num_workers = 0
fit_options = dict(train_dataloader=train_dl,
val_dataloaders=val_dl)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
# fit model
with pytest.warns(
UserWarning, match='The dataloader, train dataloader, does not have many workers which may be a bottleneck.'
):
trainer.fit(model, **fit_options)
with pytest.warns(
UserWarning, match='The dataloader, val dataloader 0, does not have many workers which may be a bottleneck.'
):
trainer.fit(model, **fit_options)
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
test_options = dict(test_dataloaders=train_dl, ckpt_path=ckpt_path)
with pytest.warns(
UserWarning, match='The dataloader, test dataloader 0, does not have many workers which may be a bottleneck.'
):
trainer.test(**test_options)
@pytest.mark.xfail(
LooseVersion(torch.__version__) < LooseVersion("1.4.0"),
reason="IterableDataset with __len__ before 1.4 raises",
)
def test_warning_with_iterable_dataset_and_len(tmpdir):
""" Tests that a warning message is shown when an IterableDataset defines `__len__`. """
model = EvalModelTemplate()
original_dataset = model.train_dataloader().dataset
class IterableWithLen(IterableDataset):
def __iter__(self):
return iter(original_dataset)
def __len__(self):
return len(original_dataset)
dataloader = DataLoader(IterableWithLen(), batch_size=16)
assert has_len(dataloader)
assert has_iterable_dataset(dataloader)
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=3,
)
with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):
trainer.fit(model, train_dataloader=dataloader, val_dataloaders=[dataloader])
with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):
trainer.test(model, test_dataloaders=[dataloader])
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
def test_dataloader_reinit_for_subclass(tmpdir):
class CustomDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, dummy_kwarg=None, **kwargs):
super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler,
num_workers, collate_fn, pin_memory, drop_last, timeout,
worker_init_fn)
self.dummy_kwarg = dummy_kwarg
trainer = Trainer(
gpus=[0, 1],
num_nodes=1,
distributed_backend='ddp_spawn',
default_root_dir=tmpdir,
)
class CustomDummyObj:
sampler = None
result = trainer.auto_add_sampler(CustomDummyObj(), shuffle=True)
assert isinstance(result, CustomDummyObj), "Wrongly reinstantiated data loader"
dataset = list(range(1000))
result = trainer.auto_add_sampler(CustomDataLoader(dataset), shuffle=True)
assert isinstance(result, torch.utils.data.DataLoader)
assert isinstance(result, CustomDataLoader)
assert hasattr(result, 'dummy_kwarg')
# Shuffled DataLoader should also work
result = trainer.auto_add_sampler(CustomDataLoader(list(range(1000)), shuffle=True), shuffle=True)
assert isinstance(result, torch.utils.data.DataLoader)
assert isinstance(result, CustomDataLoader)
assert hasattr(result, 'dummy_kwarg')
class CustomSampler(torch.utils.data.Sampler):
pass
# Should raise an error if existing sampler is being replaced
with pytest.raises(MisconfigurationException, match='DistributedSampler'):
trainer.auto_add_sampler(
CustomDataLoader(list(range(1000)), sampler=CustomSampler(list(range(1000)))), shuffle=True)
class DistribSamplerCallback(Callback):
def on_train_start(self, trainer, pl_module):
train_sampler = trainer.train_dataloader.sampler
assert isinstance(train_sampler, DistributedSampler)
assert train_sampler.shuffle
def on_validation_start(self, trainer, pl_module):
val_sampler = trainer.val_dataloaders[0].sampler
assert isinstance(val_sampler, DistributedSampler)
assert not val_sampler.shuffle
def on_test_start(self, trainer, pl_module):
test_sampler = trainer.test_dataloaders[0].sampler
assert isinstance(test_sampler, DistributedSampler)
assert not test_sampler.shuffle
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
def test_dataloader_distributed_sampler(tmpdir):
""" Test DistributedSampler and it's arguments for DDP backend """
model = EvalModelTemplate()
trainer = Trainer(
gpus=[0, 1],
num_nodes=1,
distributed_backend='ddp_spawn',
default_root_dir=tmpdir,
max_steps=1,
callbacks=[DistribSamplerCallback()]
)
trainer.fit(model)
trainer.test(ckpt_path=None)
class ModelWithDataLoaderDistributedSampler(EvalModelTemplate):
def train_dataloader(self):
dataloader = super().train_dataloader()
dist_sampler = DistributedSampler(dataloader.dataset, shuffle=True)
return DataLoader(
dataloader.dataset,
batch_size=self.batch_size,
drop_last=False,
sampler=dist_sampler,
shuffle=False
)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
def test_dataloader_distributed_sampler_already_attached(tmpdir):
""" Test DistributedSampler and it's arguments for DDP backend when DistSampler already included on dataloader """
model = ModelWithDataLoaderDistributedSampler()
trainer = Trainer(
gpus=[0, 1],
num_nodes=1,
distributed_backend='ddp_spawn',
default_root_dir=tmpdir,
max_steps=100,
callbacks=[DistribSamplerCallback()],
replace_sampler_ddp=True,
)
result = trainer.fit(model)
assert result == 1, "DDP Training failed"
@pytest.mark.skipif(torch.cuda.device_count() < 3, reason='Test requires multiple GPUs')
def test_batch_size_smaller_than_num_gpus(tmpdir):
# we need at least 3 gpus for this test
num_gpus = 3
batch_size = 3
class CurrentTestModel(EvalModelTemplate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# batch norm doesn't work with batch size 1, we replace it
self.c_d1_bn = torch.nn.ReLU()
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
loss = output['loss']
# we make sure to add some metrics to the output dict,
# this is essential for this test
output['progress_bar'] = {'train_loss': loss}
return output
def train_dataloader(self):
dataloader = super().train_dataloader()
# construct a dataset with a size that is not divisible by num_gpus
# therefore the last batch will have a size < num_gpus
size = num_gpus * batch_size + (num_gpus - 1)
dataset = Subset(dataloader.dataset, range(size))
dataloader = DataLoader(
dataset,
batch_size=self.batch_size,
drop_last=False,
)
return dataloader
hparams = EvalModelTemplate.get_default_hparams()
hparams['batch_size'] = batch_size
model = CurrentTestModel(**hparams)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0,
gpus=num_gpus,
)
# we expect the reduction for the metrics also to happen on the last batch
# where we will get fewer metrics than gpus
result = trainer.fit(model)
assert 1 == result
@pytest.mark.parametrize('check_interval', [1.0])
def test_val_dataloader_not_implemented_error(tmpdir, check_interval):
"""Test not_implemented_error data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__not_implemented_error
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=5,
max_epochs=1,
val_check_interval=check_interval,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
@pytest.mark.parametrize('check_interval', [50, 1.0])
def test_train_dataloader_not_implemented_error(tmpdir, check_interval):
"""Test not_implemented_error train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__not_implemented_error
model.val_dataloader = model.val_dataloader__not_implemented_error
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=5,
max_epochs=1,
val_check_interval=check_interval
)
result = trainer.fit(model)
# verify training completed
assert result == 1
def test_train_dataloader_not_implemented_error_failed(tmpdir):
"""Test not_implemented_error train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__not_implemented_error
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, val_check_interval=0.5)
with pytest.raises(MisconfigurationException, match='using an IterableDataset'):
trainer.fit(model)
def test_val_dataloader_not_implemented_error_failed(tmpdir):
"""Test not_implemented_error train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__not_implemented_error
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, limit_val_batches=0.5)
with pytest.raises(MisconfigurationException, match='using an IterableDataset'):
trainer.fit(model)
def test_test_dataloader_not_implemented_error_failed(tmpdir):
"""Test not_implemented_error train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.test_dataloader = model.test_dataloader__not_implemented_error
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, limit_test_batches=0.5)
with pytest.raises(MisconfigurationException, match='using an IterableDataset'):
trainer.test(model)
def test_dataloaders_load_only_once(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=0.3,
limit_val_batches=0.3,
max_epochs=3,
)
result = trainer.fit(model)
assert len(trainer.dev_debugger.val_dataloader_calls) == 1
assert len(trainer.dev_debugger.test_dataloader_calls) == 0
assert len(trainer.dev_debugger.train_dataloader_calls) == 1
# verify the sequence
calls = trainer.dev_debugger.dataloader_sequence_calls
expected_sequence = [
'val_dataloader',
'train_dataloader',
]
for call, expected in zip(calls, expected_sequence):
assert call['name'] == expected
def test_dataloaders_load_only_once_val_interval(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=10,
limit_val_batches=10,
val_check_interval=0.3,
reload_dataloaders_every_epoch=True,
max_epochs=3,
)
result = trainer.fit(model)
trainer.test()
assert len(trainer.dev_debugger.val_dataloader_calls) == 10
assert len(trainer.dev_debugger.test_dataloader_calls) == 1
assert len(trainer.dev_debugger.train_dataloader_calls) == 3
# verify the sequence
calls = trainer.dev_debugger.dataloader_sequence_calls
expected_sequence = [
'val_dataloader',
'train_dataloader',
'val_dataloader',
'val_dataloader',
'val_dataloader',
'train_dataloader',
'val_dataloader',
'val_dataloader',
'val_dataloader',
'train_dataloader',
'val_dataloader',
'val_dataloader',
'val_dataloader',
'test_dataloader'
]
for call, expected in zip(calls, expected_sequence):
assert call['name'] == expected
def test_dataloaders_load_only_once_no_sanity_check(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=0.3,
limit_val_batches=0.3,
num_sanity_val_steps=0,
max_epochs=3,
)
result = trainer.fit(model)
assert len(trainer.dev_debugger.val_dataloader_calls) == 1
assert len(trainer.dev_debugger.test_dataloader_calls) == 0
assert len(trainer.dev_debugger.train_dataloader_calls) == 1
# verify the sequence
calls = trainer.dev_debugger.dataloader_sequence_calls
expected_sequence = [
'train_dataloader',
'val_dataloader',
]
for call, expected in zip(calls, expected_sequence):
assert call['name'] == expected
def test_dataloaders_load_every_epoch(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=0.3,
limit_val_batches=0.3,
reload_dataloaders_every_epoch=True,
max_epochs=3,
)
result = trainer.fit(model)
trainer.test()
assert len(trainer.dev_debugger.val_dataloader_calls) == 4
assert len(trainer.dev_debugger.train_dataloader_calls) == 3
assert len(trainer.dev_debugger.test_dataloader_calls) == 1
# verify the sequence
calls = trainer.dev_debugger.dataloader_sequence_calls
expected_sequence = [
'val_dataloader',
'train_dataloader',
'val_dataloader',
'train_dataloader',
'val_dataloader',
'train_dataloader',
'val_dataloader',
'test_dataloader'
]
for call, expected in zip(calls, expected_sequence):
assert call['name'] == expected
def test_dataloaders_load_every_epoch_no_sanity_check(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=0.3,
limit_val_batches=0.3,
num_sanity_val_steps=0,
reload_dataloaders_every_epoch=True,
max_epochs=3,
)
result = trainer.fit(model)
trainer.test()
assert len(trainer.dev_debugger.val_dataloader_calls) == 3
assert len(trainer.dev_debugger.train_dataloader_calls) == 3
assert len(trainer.dev_debugger.test_dataloader_calls) == 1
# verify the sequence
calls = trainer.dev_debugger.dataloader_sequence_calls
expected_sequence = [
'train_dataloader',
'val_dataloader',
'train_dataloader',
'val_dataloader',
'train_dataloader',
'val_dataloader',
'test_dataloader'
]
for call, expected in zip(calls, expected_sequence):
assert call['name'] == expected
def test_dataloaders_load_only_once_passed_loaders(tmpdir):
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
train_loader = model.train_dataloader()
model.train_dataloader = None
val_loader = model.val_dataloader()
model.val_dataloader = None
test_loader = model.test_dataloader()
model.test_dataloader = None
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=0.3,
limit_val_batches=0.3,
max_epochs=3,
)
result = trainer.fit(model, train_loader, val_loader)
trainer.test(test_dataloaders=test_loader)
assert len(trainer.dev_debugger.val_dataloader_calls) == 1
assert len(trainer.dev_debugger.test_dataloader_calls) == 1
assert len(trainer.dev_debugger.train_dataloader_calls) == 1
# verify the sequence
calls = trainer.dev_debugger.dataloader_sequence_calls
expected_sequence = [
'val_dataloader',
'train_dataloader',
]
for call, expected in zip(calls, expected_sequence):
assert call['name'] == expected
| [
"torch.nn.ReLU",
"torch.utils.data.dataloader.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.device_count"
] | 1.3 | wdmwhh/pytorch-lightning | 4018237c309b7d9d6978da73132003615341e04a |
1.4 | import os
import json
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import matplotlib
import matplotlib.pyplot as plt
import warnings
from mode_disent_no_ssm.utils.skill_policy_wrapper import DiaynSkillPolicyWrapper
from mode_disent.memory.memory import MyLazyMemory
from mode_disent.env_wrappers.rlkit_wrapper import NormalizedBoxEnvForPytorch
from mode_disent.utils.mmd import compute_mmd_tutorial
from code_slac.utils import calc_kl_divergence, update_params
from mode_disent_no_ssm.network.mode_model import ModeLatentNetwork
from mode_disent_no_ssm.utils.empty_network import Empty
from mode_disent_no_ssm.utils.parse_args import json_save
from mode_disent_no_ssm.test.action_sampler import ActionSamplerNoSSM
matplotlib.use('Agg')
class DisentTrainerNoSSM:
def __init__(self,
env: NormalizedBoxEnvForPytorch,
log_dir,
min_steps_sampling,
batch_size,
num_sequences,
train_steps,
lr,
mode_dim,
num_mode_repitions,
rnn_dim,
num_rnn_layers,
rnn_dropout,
hidden_units_mode_encoder,
std_decoder,
mode_latent_model: ModeLatentNetwork,
hidden_units_action_decoder,
memory_size,
skill_policy: DiaynSkillPolicyWrapper,
log_interval,
info_loss_params,
run_id,
run_hp,
params_for_testing,
device,
leaky_slope=0.2,
seed=0,
):
self.env = env
self.observation_shape = self.env.observation_space.shape
self.action_shape = self.env.action_space.shape
self.feature_dim = int(self.observation_shape[0])
self.mode_dim = mode_dim
self.num_sequences = num_sequences
self.min_steps_sampling = min_steps_sampling
self.batch_size = batch_size
self.train_steps = train_steps
self.run_id = run_id
self.learn_steps = 0
self.episodes = 0
self.seed = None
self._set_seed(seed)
self.device = None
self._set_device(device)
if self.observation_shape[0] == self.feature_dim:
self.obs_encoder = Empty().to(self.device)
else:
self.obs_encoder = torch.nn.Linear(self.observation_shape[0],
self.feature_dim)
if mode_latent_model is None:
self.mode_latent_model = ModeLatentNetwork(
mode_dim=self.mode_dim,
representation_dim=self.feature_dim,
rnn_dim=rnn_dim,
num_rnn_layers=num_rnn_layers,
rnn_dropout=rnn_dropout,
hidden_units_mode_encoder=hidden_units_mode_encoder,
hidden_units_action_decoder=hidden_units_action_decoder,
num_mode_repeat=num_mode_repitions,
feature_dim=self.feature_dim,
action_dim=self.action_shape[0],
std_decoder=std_decoder,
device=self.device,
leaky_slope=leaky_slope,
).to(self.device)
self.mode_model_loaded = False
else:
self.mode_latent_model = mode_latent_model.to(self.device)
self.mode_model_loaded = True
self.optim = Adam(self.mode_latent_model.parameters(), lr=lr)
self.memory = MyLazyMemory(
state_rep=True,
capacity=memory_size,
num_sequences=self.num_sequences,
observation_shape=self.observation_shape,
action_shape=self.action_shape,
device=self.device
)
self.log_dir = log_dir
self.model_dir = os.path.join(self.log_dir, 'model', str(self.run_id))
self.summary_dir = os.path.join(self.log_dir, 'summary', str(self.run_id))
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
if not os.path.exists(self.summary_dir):
os.makedirs(self.summary_dir)
testparams_save_path = os.path.join(self.model_dir, 'parameters_for_testing.json')
json_save(params_for_testing, testparams_save_path)
run_hp_save_path = os.path.join(self.model_dir, 'run_hyperparameters.json')
json_save(run_hp, run_hp_save_path)
self.writer = SummaryWriter(log_dir=self.summary_dir)
self.log_interval = log_interval
self.skill_policy = skill_policy
self.info_loss_params = info_loss_params
self.num_skills = self.skill_policy.num_skills
self.steps = np.zeros(shape=self.num_skills, dtype=np.int)
def run_training(self):
self._sample_sequences(
memory_to_fill=self.memory,
min_steps=self.min_steps_sampling,
step_cnt=self.steps
)
self._train()
self._save_models()
def _train(self):
for _ in tqdm(range(self.train_steps)):
self._learn_step()
if self._is_interval(self.log_interval * 25, self.learn_steps):
self._save_models()
def _learn_step(self):
sequences = self.memory.sample_sequence(self.batch_size)
loss = self._calc_loss(sequences)
update_params(self.optim, self.mode_latent_model, loss)
self.learn_steps += 1
def _calc_loss(self, sequence):
actions_seq = sequence['actions_seq']
features_seq = self.obs_encoder(sequence['states_seq'])
skill_seq = sequence['skill_seq']
skill_seq_np_squeezed =\
self._tensor_to_numpy(skill_seq.float().mean(dim=1)) \
.astype(np.uint8).squeeze()
# Posterior and prior
mode_post = self.mode_latent_model.sample_mode_posterior(features_seq=features_seq)
mode_pri = self.mode_latent_model.sample_mode_prior(self.batch_size)
# KLD
kld = calc_kl_divergence([mode_post['dists']],
[mode_pri['dists']])
# MMD
mmd = compute_mmd_tutorial(mode_pri['samples'],
mode_post['samples'])
# Reconstruction
actions_seq_recon = self.mode_latent_model.action_decoder(
state_rep_seq=features_seq[:, :-1, :],
mode_sample=mode_post['samples']
)
# Reconstruction loss
ll = actions_seq_recon['dists'].log_prob(actions_seq).mean(dim=0).sum()
mse = F.mse_loss(actions_seq_recon['samples'], actions_seq)
# Classic beta-VAE loss
beta = 1.
classic_loss = beta * kld - ll
# Info-VAE loss
alpha = self.info_loss_params.alpha
lamda = self.info_loss_params.lamda
kld_info = (1 - alpha) * kld
mmd_info = (alpha + lamda - 1) * mmd
info_loss = mse + kld_info + mmd_info
if self.info_loss_params.kld_diff_desired is not None:
kld_desired_scalar = self.info_loss_params.kld_diff_desired
kld_desired = torch.tensor(kld_desired_scalar).to(self.device)
kld_diff_control = 0.07 * F.mse_loss(kld_desired, kld)
info_loss += kld_diff_control
# Logging
base_str_stats = 'Mode Model stats/'
with torch.no_grad():
base_str_info = 'Mode Model info-vae/'
base_str_mode_map = 'Mode Model/'
if self._is_interval(self.log_interval, self.learn_steps):
self._summary_log_mode(base_str_stats + 'log-liklyhood', ll)
self._summary_log_mode(base_str_stats + 'mse', mse)
self._summary_log_mode(base_str_stats + 'kld', kld)
self._summary_log_mode(base_str_stats + 'mmd', mmd)
self._summary_log_mode(base_str_info + 'kld info-weighted', kld_info)
self._summary_log_mode(base_str_info + 'mmd info weighted', mmd_info)
self._summary_log_mode(
base_str_info + 'loss on latent', mmd_info + kld_info)
mode_map_fig = self._plot_mode_map(
skill_seq=skill_seq,
mode_post_samples=mode_post['samples']
)
self._save_fig(
locations=['writer'],
fig=mode_map_fig,
base_str=base_str_mode_map + 'Mode Map'
)
base_str_recon = 'Mode Model Action Reconstruction'
base_str_states_features = 'Mode Model States Features'
if self._is_interval(self.log_interval, self.learn_steps):
rand_batch_idx = np.random.randint(0, self.batch_size)
fig_actions = self._plot_recon_comparison(
action_seq=actions_seq[rand_batch_idx],
action_seq_recon=actions_seq_recon['samples'][rand_batch_idx],
skill=skill_seq_np_squeezed[rand_batch_idx]
)
self._save_fig(fig_actions, ['writer'],
base_str=base_str_recon)
fig_states = self._plot_state_features(
states_seq=sequence['states_seq'][rand_batch_idx, :-1, :],
features_seq=features_seq[rand_batch_idx, :-1, :],
skill=skill_seq_np_squeezed[rand_batch_idx]
)
self._save_fig(fig_states, ['writer'],
base_str=base_str_states_features)
if self._is_interval(self.log_interval * 20, self.learn_steps) and self.env.observation_space.shape[0] < 4:
self._test_mode_influence(mode_post_samples=mode_post['samples'])
return info_loss
def _sample_sequences(self,
memory_to_fill: MyLazyMemory,
min_steps,
step_cnt: np.ndarray):
skill = 0
while np.sum(step_cnt) < min_steps:
self._sample_equal_skill_dist(memory=memory_to_fill,
skill=skill,
step_cnt=step_cnt)
skill = min(skill + 1, (skill + 1) % self.num_skills)
self.episodes += 1
print(self.steps)
memory_to_fill.skill_histogram(writer=self.writer)
def _sample_equal_skill_dist(self,
memory: MyLazyMemory,
skill,
step_cnt: np.ndarray):
episode_steps = 0
self.skill_policy.set_skill(skill)
obs = self.env.reset()
memory.set_initial_state(obs)
next_state = obs
done = False
while self.steps[skill] <= np.max(self.steps):
if done:
next_state = self.env.reset()
action = self.skill_policy.get_action(
obs_denormalized=self.env.denormalize(next_state)
if self.env.state_normalization else next_state
)
next_state, reward, done, _ = self.env.step(action)
episode_steps += 1
step_cnt[skill] += 1
seq_pushed = memory.append(action=action,
skill=np.array([skill], dtype=np.uint8),
state=next_state,
done=np.array([done], dtype=np.bool))
if seq_pushed:
break
print(f'episode: {self.episodes:<4} '
f'episode_steps: {episode_steps:<4} '
f'skill: {skill: <4} ')
def _plot_mode_map(self,
skill_seq,
mode_post_samples,
):
"""
Args:
skill_seq : (N, S, 1) - tensor
mode_post_samples : (N, 2) - tensor
"""
plot_dim = 2
if not self.mode_dim == plot_dim:
warnings.warn(f'Warning: Mode-Dimension is not equal to {plot_dim:<2}.'
f'No mode map is plotted')
return None
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k',
'darkorange', 'gray', 'lightgreen']
if self.num_skills > len(colors):
raise ValueError(f'Not more than then {len(colors):<3} '
f'skill supported for mode'
f'plotting right now (more color needed)')
assert mode_post_samples.shape == torch.Size((self.batch_size, plot_dim))
skill_seq = self._tensor_to_numpy(skill_seq.float().mean(dim=1))\
.astype(np.uint8).squeeze()
mode_post_samples = self._tensor_to_numpy(mode_post_samples)
plt.interactive(False)
_, axes = plt.subplots()
lim = [-3., 3.]
axes.set_ylim(lim)
axes.set_xlim(lim)
for skill in range(skill_seq.max() + 1):
bool_idx = skill_seq == skill
plt.scatter(mode_post_samples[bool_idx, 0],
mode_post_samples[bool_idx, 1],
label=skill,
c=colors[skill])
axes.legend()
axes.grid(True)
fig = plt.gcf()
return fig
def _plot_recon_comparison(self,
action_seq,
action_seq_recon,
skill: int,
):
"""
Args:
action_seq : (S, action_dim)
action_seq_recon : (S, action_dim)
Return:
figure
"""
action_dim = self.action_shape[0]
assert action_seq.size(1) == action_seq_recon.size(1) == action_dim
assert action_seq.size(0) == action_seq_recon.size(0) \
== self.num_sequences
action_seq = self._tensor_to_numpy(action_seq)
action_seq_recon = self._tensor_to_numpy(action_seq_recon)
plt.interactive(False)
_, axes1 = plt.subplots()
lim = [-1.3, 1.3]
axes1.set_ylim(lim)
plt.title(f'Skill: {skill:<3}')
for dim in range(action_dim):
plt.plot(action_seq[:, dim], label=f'real action dim {dim:<2}')
plt.plot(action_seq_recon[:, dim], label=f'recon action dim {dim:<2}')
plt.legend()
fig = plt.gcf()
return fig
def _plot_state_features(self,
states_seq,
features_seq,
skill: int,
):
"""
Args:
states_seq : (S, obs_dim)
features_seq : (S, feature_dim)
Return:
figure
"""
obs_dim = self.observation_shape[0]
feature_dim = self.feature_dim
assert states_seq.size(0) == features_seq.size(0) == self.num_sequences
assert features_seq.size(1) == feature_dim
assert states_seq.size(1) == obs_dim
states_seq = self._tensor_to_numpy(states_seq)
features_seq = self._tensor_to_numpy(features_seq)
plt.interactive(False)
_, axes = plt.subplots()
lim = [-1, 1]
axes.set_ylim(lim)
plt.title(f'Skill: {skill:<3}')
for dim in range(obs_dim):
plt.plot(states_seq[:, dim], label=f'state dim {dim:<2}')
for dim in range(feature_dim):
plt.plot(features_seq[:, dim], label=f'features dim {dim:<2}')
plt.legend()
fig = plt.gcf()
return fig
def _test_mode_influence(self, mode_post_samples, seq_len=250):
with torch.no_grad():
mode_action_sampler = ActionSamplerNoSSM(
mode_model=self.mode_latent_model,
device=self.device
)
if not(self.mode_dim == 2):
modes = mode_post_samples[:10]
else:
modes = self._create_grid()
for mode in modes:
mode_action_sampler.reset(mode=mode.unsqueeze(0))
obs = self.env.reset()
action_save = []
obs_save = []
for _ in range(seq_len):
obs_tensor = torch.from_numpy(obs.astype(np.float))\
.unsqueeze(0).to(self.device).float()
action_tensor = mode_action_sampler(
state_rep=self.obs_encoder(obs_tensor)
)
action = action_tensor.squeeze().detach().cpu().numpy()
obs, _, done, _ = self.env.step(action)
action_save.append(action)
assert obs.shape == self.env.observation_space.shape
obs_save.append(obs)
actions = np.stack(action_save, axis=0)
obs = np.stack(obs_save, axis=0)
plt.interactive(False)
ax = plt.gca()
ax.set_ylim([-3.5, 3.5])
plt.plot(actions, label='actions')
for dim in range(obs.shape[1]):
plt.plot(obs[:, dim], label='state_dim' + str(dim))
plt.legend()
fig = plt.gcf()
self.writer.add_figure('mode_grid_plot_test/mode' + str(mode),
figure=fig,
global_step=self.learn_steps)
def _create_grid(self):
min = -1.7
max = -min
num_steps = 4
grid_vec = torch.linspace(min, max, num_steps)
grid_vec_list = [grid_vec] * self.mode_dim
grid = torch.meshgrid(*grid_vec_list)
modes = torch.stack(list(grid)).view(self.mode_dim, -1) \
.transpose(0, -1).to(self.device)
return modes
def _set_seed(self, seed):
self.seed = seed
torch.manual_seed(self.seed)
np.random.seed(self.seed)
self.env.seed(self.seed)
def _set_device(self, device_str):
self.device = torch.device(
device_str if torch.cuda.is_available() else "cpu"
)
print("device set to " + str(self.device))
def _save_models(self):
# Models
path_name_mode = os.path.join(self.model_dir, 'mode_model.pkl')
path_name_obs_encoder = os.path.join(self.model_dir, 'obs_encoder.pkl')
torch.save(self.mode_latent_model, path_name_mode)
torch.save(self.obs_encoder, path_name_obs_encoder)
# Mode mapping fig
sequence = self.memory.sample_sequence(self.batch_size)
features_seq = self.obs_encoder(sequence['states_seq'])
fig = self._plot_mode_map(
sequence['skill_seq'],
self.mode_latent_model.sample_mode_posterior(features_seq)['samples']
)
self._save_fig(fig,
locations=['file'],
base_str='mode_mapping.fig')
def _summary_log_mode(self, data_name, data):
if type(data) == torch.Tensor:
data = data.detach().cpu().item()
self.writer.add_scalar(data_name, data, self.learn_steps)
@staticmethod
def _is_interval(log_interval, steps):
return True if steps % log_interval == 0 else False
@staticmethod
def _tensor_to_numpy(tensor: torch.tensor):
return tensor.detach().cpu().numpy()
def _numpy_to_tensor(self, nd_array: np.ndarray):
return torch.from_numpy(nd_array).to(self.device)
def _save_fig(self, fig, locations: list, base_str):
for loc in locations:
if loc == 'writer':
self.writer.add_figure(base_str + 'mode mapping',
fig,
global_step=self.learn_steps)
elif loc == 'file':
path_name_fig = os.path.join(self.model_dir, 'mode_mapping.fig')
torch.save(obj=fig, f=path_name_fig)
else:
raise NotImplementedError(f'Location {loc} is not implemented')
plt.clf()
| [
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.meshgrid",
"torch.Size",
"torch.manual_seed",
"torch.tensor",
"torch.utils.tensorboard.SummaryWriter",
"torch.save",
"torch.linspace",
"torch.no_grad",
"torch.from_numpy",
"torch.nn.functional.mse_loss"
] | 1.4.0 | fgitmichael/AutoregressiveModeDisentangling | c556b2384ba90e87acd1358d5aae34c3cf477ae5 |
1.8 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from .base_grad_scaler import BaseGradScaler
__all__ = ['DynamicGradScaler']
class DynamicGradScaler(BaseGradScaler):
def __init__(self,
initial_scale: int = 2**16,
growth_factor: int = 2,
backoff_factor: float = 0.5,
growth_interval: int = 1000,
min_scale: int = None,
max_scale: int = None,
hysteresis: int = None,
verbose: bool = False):
super().__init__(initial_scale, verbose)
self._min_scale = min_scale
self._max_scale = max_scale
self._growth_factor = growth_factor
self._backoff_factor = backoff_factor
self._growth_interval = growth_interval
self._growth_step = 0
self._hysteresis = hysteresis
self._hysteresis_step = 0
self._sanity_checks()
def _sanity_checks(self) -> None:
if self._min_scale:
assert self._min_scale > 0, 'The minimum gradient scale cannot be zero or negative'
if self._max_scale:
assert self._min_scale > 0, 'The maximum gradient scale cannot be zero or negative'
assert self._growth_factor > 1, 'The growth factor cannot be equal or smaller than 1'
assert self._backoff_factor < 1 and self._backoff_factor > 0, 'The backoff factor must be between 0 and 1'
assert self._hysteresis >= 0, 'The hysteresis cannot be negative'
def update(self, overflow: bool) -> None:
if overflow:
self._hysteresis_step += 1
self._growth_step = 0
if self._hysteresis_step >= self._hysteresis:
self._backoff_scale()
self.log(f"Overflow occurs, the loss scale is adjusted to {self.scale.item()}", ranks=[0])
else:
self._growth_step += 1
if self._growth_step == self._growth_interval:
self._growth_step = 0
self._hysteresis_step = 0
self._grow_scale()
self.log(
f"No overflow for consecutive {self._growth_interval} steps, "
f"the loss scale is adjusted to {self.scale.item()}",
ranks=[0])
def _backoff_scale(self) -> None:
self._scale = self._scale * self._backoff_factor
if self._min_scale:
self._scale = torch.max(self._scale, self._min_scale)
def _grow_scale(self) -> None:
self._scale = self._scale * self._growth_factor
if self._max_scale:
self._scale = torch.min(self._scale, self._max_scale)
| [
"torch.min",
"torch.max"
] | 1.8 | oikosohn/ColossalAI | fc5101f24c9a2ad8e7e16cb81e1ef7646a1061fd |
1.0 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Training a CLIP like dual encoder models using text and vision encoders in the library.
The script can be used to train CLIP like models for languages other than English by using
a text encoder pre-trained in the desired language. Currently this script supports the following vision
and text models:
Vision models: ViT(https://huggingface.co/models?filter=vit), CLIP (https://huggingface.co/models?filter=clip)
Text models: BERT, ROBERTa (https://huggingface.co/models?filter=fill-mask)
"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.io import ImageReadMode, read_image
from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
AutoFeatureExtractor,
AutoModel,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
logger = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.18.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
feature_extractor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."})
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
freeze_vision_model: bool = field(
default=False, metadata={"help": "Whether to freeze the vision model parameters or not."}
)
freeze_text_model: bool = field(
default=False, metadata={"help": "Whether to freeze the text model parameters or not."}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
data_dir: Optional[str] = field(default=None, metadata={"help": "The data directory containing input files."})
image_column: Optional[str] = field(
default="image_path",
metadata={"help": "The name of the column in the datasets containing the full image file paths."},
)
caption_column: Optional[str] = field(
default="caption",
metadata={"help": "The name of the column in the datasets containing the image captions."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file (a jsonlines file)."},
)
max_seq_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension == "json", "`validation_file` should be a json file."
dataset_name_mapping = {
"image_caption_dataset.py": ("image_path", "caption"),
}
# We use torchvision for faster image pre-processing. The transforms are implemented as nn.Module,
# so we jit it to be faster.
class Transform(torch.nn.Module):
def __init__(self, image_size, mean, std):
super().__init__()
self.transforms = torch.nn.Sequential(
Resize([image_size], interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
ConvertImageDtype(torch.float),
Normalize(mean, std),
)
def forward(self, x: Image) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
return x
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
input_ids = torch.tensor([example["input_ids"] for example in examples], dtype=torch.long)
attention_mask = torch.tensor([example["attention_mask"] for example in examples], dtype=torch.long)
return {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"return_loss": True,
}
def main():
# 1. Parse input arguments
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# 2. Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# 3. Detecting last checkpoint and eventualy continue from last checkpoint
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# 4. Load dataset
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full image path and the second column for the
# captions (unless you specify column names for this with the `image_column` and `caption_column` arguments).
#
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
keep_in_memory=False,
data_dir=data_args.data_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# 5. Load pretrained model, tokenizer, and feature extractor
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# Load feature_extractor, in this script we only use this to get the mean and std for normalization.
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModel.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config = model.config
def _freeze_params(module):
for param in module.parameters():
param.requires_grad = False
if model_args.freeze_vision_model:
_freeze_params(model.vision_model)
if model_args.freeze_text_model:
_freeze_params(model.text_model)
# set seed for torch dataloaders
set_seed(training_args.seed)
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = dataset["train"].column_names
elif training_args.do_eval:
column_names = dataset["validation"].column_names
elif training_args.do_predict:
column_names = dataset["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# 6. Get the column names for input/target.
dataset_columns = dataset_name_mapping.get(data_args.dataset_name, None)
if data_args.image_column is None:
image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
image_column = data_args.image_column
if image_column not in column_names:
raise ValueError(
f"--image_column' value '{data_args.image_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.caption_column is None:
caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
caption_column = data_args.caption_column
if caption_column not in column_names:
raise ValueError(
f"--caption_column' value '{data_args.caption_column}' needs to be one of: {', '.join(column_names)}"
)
# 7. Preprocessing the datasets.
# Initialize torchvision transforms and jit it for faster processing.
image_transformations = Transform(
config.vision_config.image_size, feature_extractor.image_mean, feature_extractor.image_std
)
image_transformations = torch.jit.script(image_transformations)
# Preprocessing the datasets.
# We need to tokenize input captions and transform the images.
def tokenize_captions(examples):
captions = [caption for caption in examples[caption_column]]
text_inputs = tokenizer(captions, max_length=data_args.max_seq_length, padding="max_length", truncation=True)
examples["input_ids"] = text_inputs.input_ids
examples["attention_mask"] = text_inputs.attention_mask
return examples
def transform_images(examples):
images = [read_image(image_file, mode=ImageReadMode.RGB) for image_file in examples[image_column]]
examples["pixel_values"] = [image_transformations(image) for image in images]
return examples
def filter_corrupt_images(examples):
"""remove problematic images"""
valid_images = []
for image_file in examples[image_column]:
try:
Image.open(image_file)
valid_images.append(True)
except Exception:
valid_images.append(False)
return valid_images
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset")
train_dataset = dataset["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
train_dataset = train_dataset.filter(
filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers
)
train_dataset = train_dataset.map(
function=tokenize_captions,
batched=True,
remove_columns=[col for col in column_names if col != image_column],
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
# Transform images on the fly as doing it on the whole dataset takes too much time.
train_dataset.set_transform(transform_images)
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a train validation")
eval_dataset = dataset["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
eval_dataset = eval_dataset.filter(
filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers
)
eval_dataset = eval_dataset.map(
function=tokenize_captions,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[col for col in column_names if col != image_column],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
# Transform images on the fly as doing it on the whole dataset takes too much time.
eval_dataset.set_transform(transform_images)
if training_args.do_predict:
if "test" not in dataset:
raise ValueError("--do_predict requires a test dataset")
test_dataset = dataset["test"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(test_dataset), data_args.max_eval_samples)
test_dataset = test_dataset.select(range(max_eval_samples))
test_dataset = test_dataset.filter(
filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers
)
test_dataset = test_dataset.map(
function=tokenize_captions,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[col for col in column_names if col != image_column],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on test dataset",
)
# Transform images on the fly as doing it on the whole dataset takes too much time.
test_dataset.set_transform(transform_images)
# 8. Initalize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
data_collator=collate_fn,
)
# 9. Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# 10. Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# 11. Write Training Stats and push to hub.
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "contrastive-image-text-modeling"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main()
| [
"torch.jit.script",
"torch.stack",
"torch.no_grad",
"torch.tensor"
] | 1.0 | Sophylax/transformers | 5d565dd8526ec794a701a80f216e03e506b1c607 |
1.6 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import torch
from botorch import fit_gpytorch_model
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import OptimizationWarning
from botorch.models.gp_regression_mixed import MixedSingleTaskGP
from botorch.models.kernels.categorical import CategoricalKernel
from botorch.models.transforms import Normalize, Standardize
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling import SobolQMCNormalSampler
from botorch.utils.containers import TrainingData
from botorch.utils.testing import _get_random_data, BotorchTestCase
from gpytorch.kernels.kernel import AdditiveKernel, ProductKernel
from gpytorch.kernels.matern_kernel import MaternKernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.means import ConstantMean
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from .test_gp_regression import _get_pvar_expected
class TestMixedSingleTaskGP(BotorchTestCase):
def test_gp(self):
d = 3
bounds = torch.tensor([[-1.0] * d, [1.0] * d])
for batch_shape, m, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(0, 1, 3),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, **tkwargs
)
cat_dims = list(range(ncat))
# test unsupported options
with self.assertRaises(UnsupportedError):
MixedSingleTaskGP(
train_X,
train_Y,
cat_dims=cat_dims,
outcome_transform=Standardize(m=m, batch_shape=batch_shape),
)
with self.assertRaises(UnsupportedError):
MixedSingleTaskGP(
train_X,
train_Y,
cat_dims=cat_dims,
input_transform=Normalize(
d=d, bounds=bounds.to(**tkwargs), transform_on_train=True
),
)
if len(cat_dims) == 0:
with self.assertRaises(ValueError):
MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
continue
model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
self.assertEqual(model._ignore_X_dims_scaling_check, cat_dims)
mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)
# test init
self.assertIsInstance(model.mean_module, ConstantMean)
if ncat < 3:
self.assertIsInstance(model.covar_module, AdditiveKernel)
sum_kernel, prod_kernel = model.covar_module.kernels
self.assertIsInstance(sum_kernel, ScaleKernel)
self.assertIsInstance(sum_kernel.base_kernel, AdditiveKernel)
self.assertIsInstance(prod_kernel, ScaleKernel)
self.assertIsInstance(prod_kernel.base_kernel, ProductKernel)
sum_cont_kernel, sum_cat_kernel = sum_kernel.base_kernel.kernels
prod_cont_kernel, prod_cat_kernel = prod_kernel.base_kernel.kernels
self.assertIsInstance(sum_cont_kernel, MaternKernel)
self.assertIsInstance(sum_cat_kernel, ScaleKernel)
self.assertIsInstance(sum_cat_kernel.base_kernel, CategoricalKernel)
self.assertIsInstance(prod_cont_kernel, MaternKernel)
self.assertIsInstance(prod_cat_kernel, CategoricalKernel)
else:
self.assertIsInstance(model.covar_module, ScaleKernel)
self.assertIsInstance(model.covar_module.base_kernel, CategoricalKernel)
# test posterior
# test non batch evaluation
X = torch.rand(batch_shape + torch.Size([4, d]), **tkwargs)
expected_shape = batch_shape + torch.Size([4, m])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
self.assertEqual(posterior.variance.shape, expected_shape)
# test adding observation noise
posterior_pred = model.posterior(X, observation_noise=True)
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_shape)
self.assertEqual(posterior_pred.variance.shape, expected_shape)
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
self.assertTrue(torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))
# test batch evaluation
X = torch.rand(2, *batch_shape, 3, d, **tkwargs)
expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, m])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(posterior.mean.shape, expected_shape)
# test adding observation noise in batch mode
posterior_pred = model.posterior(X, observation_noise=True)
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_shape)
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
self.assertTrue(torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))
def test_condition_on_observations(self):
d = 3
for batch_shape, m, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(1, 2),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, **tkwargs
)
cat_dims = list(range(ncat))
model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
# evaluate model
model.posterior(torch.rand(torch.Size([4, d]), **tkwargs))
# test condition_on_observations
fant_shape = torch.Size([2])
# fantasize at different input points
X_fant, Y_fant = _get_random_data(
fant_shape + batch_shape, m=m, d=d, n=3, **tkwargs
)
cm = model.condition_on_observations(X_fant, Y_fant)
# fantasize at same input points (check proper broadcasting)
cm_same_inputs = model.condition_on_observations(
X_fant[0],
Y_fant,
)
test_Xs = [
# test broadcasting single input across fantasy and model batches
torch.rand(4, d, **tkwargs),
# separate input for each model batch and broadcast across
# fantasy batches
torch.rand(batch_shape + torch.Size([4, d]), **tkwargs),
# separate input for each model and fantasy batch
torch.rand(fant_shape + batch_shape + torch.Size([4, d]), **tkwargs),
]
for test_X in test_Xs:
posterior = cm.posterior(test_X)
self.assertEqual(
posterior.mean.shape, fant_shape + batch_shape + torch.Size([4, m])
)
posterior_same_inputs = cm_same_inputs.posterior(test_X)
self.assertEqual(
posterior_same_inputs.mean.shape,
fant_shape + batch_shape + torch.Size([4, m]),
)
# check that fantasies of batched model are correct
if len(batch_shape) > 0 and test_X.dim() == 2:
state_dict_non_batch = {
key: (val[0] if val.ndim > 1 else val)
for key, val in model.state_dict().items()
}
model_kwargs_non_batch = {
"train_X": train_X[0],
"train_Y": train_Y[0],
"cat_dims": cat_dims,
}
model_non_batch = type(model)(**model_kwargs_non_batch)
model_non_batch.load_state_dict(state_dict_non_batch)
model_non_batch.eval()
model_non_batch.likelihood.eval()
model_non_batch.posterior(torch.rand(torch.Size([4, d]), **tkwargs))
cm_non_batch = model_non_batch.condition_on_observations(
X_fant[0][0],
Y_fant[:, 0, :],
)
non_batch_posterior = cm_non_batch.posterior(test_X)
self.assertTrue(
torch.allclose(
posterior_same_inputs.mean[:, 0, ...],
non_batch_posterior.mean,
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
posterior_same_inputs.mvn.covariance_matrix[:, 0, :, :],
non_batch_posterior.mvn.covariance_matrix,
atol=1e-3,
)
)
def test_fantasize(self):
d = 3
for batch_shape, m, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(1, 2),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, **tkwargs
)
cat_dims = list(range(ncat))
model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
# fantasize
X_f = torch.rand(torch.Size(batch_shape + torch.Size([4, d])), **tkwargs)
sampler = SobolQMCNormalSampler(num_samples=3)
fm = model.fantasize(X=X_f, sampler=sampler)
self.assertIsInstance(fm, model.__class__)
fm = model.fantasize(X=X_f, sampler=sampler, observation_noise=False)
self.assertIsInstance(fm, model.__class__)
def test_subset_model(self):
d, m = 3, 2
for batch_shape, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])),
(1, 2),
(torch.float, torch.double),
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, **tkwargs
)
cat_dims = list(range(ncat))
model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
with self.assertRaises(NotImplementedError):
model.subset_output([0])
# TODO: Support subsetting MixedSingleTaskGP models
# X = torch.rand(torch.Size(batch_shape + torch.Size([3, d])), **tkwargs)
# p = model.posterior(X)
# p_sub = subset_model.posterior(X)
# self.assertTrue(
# torch.allclose(p_sub.mean, p.mean[..., [0]], atol=1e-4, rtol=1e-4)
# )
# self.assertTrue(
# torch.allclose(
# p_sub.variance, p.variance[..., [0]], atol=1e-4, rtol=1e-4
# )
# )
def test_construct_inputs(self):
d, m = 3, 1
for batch_shape, ncat, dtype in itertools.product(
(torch.Size(), torch.Size([2])), (1, 2), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, m=m, d=d, **tkwargs
)
cat_dims = list(range(ncat))
training_data = TrainingData.from_block_design(X=train_X, Y=train_Y)
kwarg_dict = MixedSingleTaskGP.construct_inputs(
training_data, categorical_features=cat_dims
)
self.assertTrue(torch.equal(kwarg_dict["train_X"], train_X))
self.assertTrue(torch.equal(kwarg_dict["train_Y"], train_Y))
self.assertEqual(kwarg_dict["cat_dims"], cat_dims)
self.assertIsNone(kwarg_dict["likelihood"])
| [
"torch.Size",
"torch.rand",
"torch.tensor",
"torch.allclose",
"torch.equal"
] | 1.6 | sgbaird/botorch | 3318c06333166328c95d24edd1055d4ef4bded70 |
0.4 | import torch
import torch.nn as nn
from pytorch_direct_warp.direct_proj import direct_projection
class DirectWarper(nn.Module):
def __init__(self, keep_index=False):
super(DirectWarper, self).__init__()
self.id_grid = None
self.keep_index = keep_index
def set_id_grid(self, depth):
b, h, w = depth.size()
i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]
j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]
ones = depth.new_ones(1,h,w)
self.id_grid = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]
def forward(self, depth, img, pose_matrix, intrinsics, dilation=0):
b, h, w = depth.size()
if (self.id_grid is None) or \
(self.id_grid.size(1) < h) or \
(self.id_grid.size(2) < w):
self.set_id_grid(depth)
rot_matrix = intrinsics @ pose_matrix[:,:,:3] @ intrinsics.inverse()
tr = intrinsics @ pose_matrix[:,:,-1:]
point_cloud = (self.id_grid[:,:,:h,:w].expand(b,3,h,w)*depth.unsqueeze(1)).view(b, 3, -1)
point_sizes = point_cloud[:,-1:] * (2*dilation + 1)
transformed_points = rot_matrix @ point_cloud + tr
square_cloud = torch.cat([transformed_points, point_sizes], dim=1)
if img is not None:
colors = img.view(b, img.size(1), -1)
w_depth, w_colors, index = direct_projection(square_cloud, colors, h, w)
if self.keep_index:
self.index = index
return w_depth, w_colors
else:
w_depth, index = direct_projection(square_cloud, None, h, w)
if self.keep_index:
self.index = index
return w_depth
| [
"torch.cat",
"torch.stack",
"torch.arange"
] | 0.4.1 | ClementPinard/direct-warper | be46410202c8cd9efb982b5dc4c1eb954ab45b10 |
1.0 | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Various convolutional networks.
"""
from typing import List, Optional, Tuple, Union, Dict, Any
import torch
from texar.torch.core.layers import get_pooling_layer_hparams
from texar.torch.hyperparams import HParams
from texar.torch.modules.networks.network_base import FeedForwardNetworkBase
from texar.torch.utils.shapes import mask_sequences
from texar.torch.utils.utils import uniquify_str
__all__ = [
"_to_list",
"Conv1DNetwork",
]
def _to_list(value: Union[Dict[str, Any], List, Tuple, int], name=None,
list_length=None):
r"""Converts `hparams` value into a list.
If :attr:`list_length` is given, then the canonicalized :attr:`value`
must be of length :attr:`list_length`.
"""
if not isinstance(value, (list, tuple)):
if list_length is not None:
value = [value] * list_length
else:
value = [value]
if list_length is not None and len(value) != list_length:
name = '' if name is None else name
raise ValueError("hparams '%s' must be a list of length %d"
% (name, list_length))
return value
class Conv1DNetwork(FeedForwardNetworkBase):
r"""Simple `Conv-1D` network which consists of a sequence of convolutional
layers followed with a sequence of dense layers.
Args:
in_channels (int): Number of channels in the input tensor.
in_features (int): Size of the feature dimension in the input tensor.
hparams (dict, optional): Hyperparameters. Missing
hyperparameter will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
See :meth:`forward` for the inputs and outputs. If :attr:`"data_format"` is
set to ``"channels_first"`` (this is the default), inputs must be a tensor
of shape `[batch_size, channels, length]`. If :attr:`"data_format"` is set
to ``"channels_last"``, inputs must be a tensor of shape
`[batch_size, length, channels]`. For example, for sequence classification,
`length` corresponds to time steps, and `channels` corresponds to embedding
dim.
Example:
.. code-block:: python
nn = Conv1DNetwork(in_channels=20, in_features=256) # Use the default
inputs = torch.randn([64, 20, 256])
outputs = nn(inputs)
# outputs == Tensor of shape [64, 256], because the final dense layer
# has size 256.
.. document private functions
"""
def __init__(self, in_channels: int, in_features: Optional[int] = None,
hparams=None):
super().__init__(hparams=hparams)
if self.hparams.num_dense_layers > 0 and in_features is None:
raise ValueError("\"in_features\" cannot be None "
"if \"num_dense_layers\" > 0")
# construct only non-dense layers first
layer_hparams = self._build_non_dense_layer_hparams(
in_channels=in_channels)
self._build_layers(layers=None, layer_hparams=layer_hparams)
if self.hparams.num_dense_layers > 0:
if in_features is None:
raise ValueError("\"in_features\" cannot be None "
"if \"num_dense_layers\" > 0")
ones = torch.ones(1, in_channels, in_features)
input_size = self._infer_dense_layer_input_size(ones)
layer_hparams = self._build_dense_hparams(
in_features=input_size[1], layer_hparams=layer_hparams)
self._build_layers(layers=None, layer_hparams=layer_hparams)
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
# (1) Conv layers
"num_conv_layers": 1,
"out_channels": 128,
"kernel_size": [3, 4, 5],
"conv_activation": "ReLU",
"conv_activation_kwargs": None,
"other_conv_kwargs": {},
"data_format": "channels_first",
# (2) Pooling layers
"pooling": "MaxPool1d",
"pool_size": None,
"pool_stride": 1,
"other_pool_kwargs": {},
# (3) Dense layers
"num_dense_layers": 1,
"out_features": 256,
"dense_activation": None,
"dense_activation_kwargs": None,
"final_dense_activation": None,
"final_dense_activation_kwargs": None,
"other_dense_kwargs": None,
# (4) Dropout
"dropout_conv": [1],
"dropout_dense": [],
"dropout_rate": 0.75,
# (5) Others
"name": "conv1d_network"
}
Here:
1. For **convolutional** layers:
`"num_conv_layers"`: int
Number of convolutional layers.
`"out_channels"`: int or list
The number of out_channels in the convolution, i.e., the
dimensionality of the output space.
- If ``"num_conv_layers"`` > 1 and ``"out_channels"`` is an int,
all convolution layers will have the same number of output
channels.
- If ``"num_conv_layers"`` > 1 and ``"out_channels"`` is a list,
the length must equal ``"num_conv_layers"``. The number of
output channels of each convolution layer will be the
corresponding element from this list.
`"kernel_size"`: int or list
Lengths of 1D convolution windows.
- If `"num_conv_layers"` = 1, this can also be a ``int`` list of
arbitrary length denoting differently sized convolution
windows. The number of output channels of each size is
specified by ``"out_channels"``.
For example, the default values will create 3 convolution
layers, each of which has kernel size of 3, 4, and 5,
respectively, and has output channel 128.
- If `"num_conv_layers"` > 1, this must be a list of length
``"num_conv_layers"``. Each element can be an ``int`` or a
``int`` list of arbitrary length denoting the kernel size of
each layer.
`"conv_activation"`: str or callable
Activation applied to the output of the convolutional
layers. Set to `None` to maintain a linear activation.
See :func:`~texar.torch.core.get_layer` for more details.
`"conv_activation_kwargs"`: dict, optional
Keyword arguments for the activation following the convolutional
layer. See :func:`~texar.torch.core.get_layer` for more details.
`"other_conv_kwargs"`: list or dict, optional
Other keyword arguments for :torch_nn:`Conv1d` constructor,
e.g., ``padding``.
- If a dict, the same dict is applied to all the convolution
layers.
- If a list, the length must equal ``"num_conv_layers"``. This
list can contain nested lists. If the convolution layer at
index i has multiple kernel sizes, then the corresponding
element of this list can also be a list of length equal to
``"kernel_size"`` at index i. If the element at index i is
instead a dict, then the same dict gets applied to all the
convolution layers at index i.
`"data_format"`: str, optional
Data format of the input tensor. Defaults to ``channels_first``
denoting the first dimension to be the channel dimension. Set it
to ``channels_last`` to treat last dimension as the channel
dimension. This argument can also be passed in ``forward``
function, in which case the value specified here will be
ignored.
2. For **pooling** layers:
`"pooling"`: str or class or instance
Pooling layer after each of the convolutional layer(s). Can be a
pooling layer class, its name or module path, or a class
instance.
`"pool_size"`: int or list, optional
Size of the pooling window. If an ``int``, all pooling layer
will have the same pool size. If a list, the list length must
equal ``"num_conv_layers"``. If `None` and the pooling type
is either :torch_docs:`MaxPool1d <nn.html#maxpool1d>` or
:torch_docs:`AvgPool1d <nn.html#avgpool1d>`, the pool size will
be set to input size. That is, the output of the pooling layer
is a single unit.
`"pool_stride"`: int or list, optional
Strides of the pooling operation. If an ``int``, all
layers will have the same stride. If a list, the list length
must equal ``"num_conv_layers"``.
`"other_pool_kwargs"`: list or dict, optional
Other keyword arguments for pooling layer class constructor.
- If a dict, the same dict is applied to all the pooling layers.
- If a list, the length must equal ``"num_conv_layers"``. The
pooling arguments for layer i will be the element at index i
from this list.
3. For **dense** layers (note that here dense layers always follow
convolutional and pooling layers):
`"num_dense_layers"`: int
Number of dense layers.
`"out_features"`: int or list
Dimension of features after the dense layers. If an
``int``, all dense layers will have the same feature dimension.
If a list of ``int``, the list length must equal
``"num_dense_layers"``.
`"dense_activation"`: str or callable
Activation function applied to the output of the dense
layers **except** the last dense layer output. Set to
`None` to maintain a linear activation.
`"dense_activation_kwargs"`: dict, optional
Keyword arguments for dense layer activation functions before
the last dense layer.
`"final_dense_activation"`: str or callable
Activation function applied to the output of the **last** dense
layer. Set to `None` to maintain a linear activation.
`"final_dense_activation_kwargs"`: dict, optional
Keyword arguments for the activation function of last
dense layer.
`"other_dense_kwargs"`: dict, optional
Other keyword arguments for dense layer class constructor.
4. For **dropouts**:
`"dropout_conv"`: int or list
The indices of convolutional layers (starting from 0) whose
**inputs** are applied with dropout.
The index = :attr:`num_conv_layers` means dropout applies to the
final convolutional layer output. For example,
.. code-block:: python
{
"num_conv_layers": 2,
"dropout_conv": [0, 2]
}
will leads to a series of layers as
`-dropout-conv0-conv1-dropout-`.
The dropout mode (training or not) is controlled
by :attr:`self.training`.
`"dropout_dense"`: int or list
Same as ``"dropout_conv"`` but applied to dense layers (index
starting from 0).
`"dropout_rate"`: float
The dropout rate, between 0 and 1. For example,
``"dropout_rate": 0.1`` would drop out 10% of elements.
5. Others:
`"name"`: str
Name of the network.
"""
return {
# (1) Conv layers
"num_conv_layers": 1,
"out_channels": 128,
"kernel_size": [3, 4, 5],
"conv_activation": "ReLU",
"conv_activation_kwargs": None,
"other_conv_kwargs": {},
"data_format": "channels_first",
# (2) Pooling layers
"pooling": "MaxPool1d",
"pool_size": None,
"pool_stride": 1,
"other_pool_kwargs": {},
# (3) Dense layers
"num_dense_layers": 1,
"out_features": 256,
"dense_activation": None,
"dense_activation_kwargs": None,
"final_dense_activation": None,
"final_dense_activation_kwargs": None,
"other_dense_kwargs": None,
# (4) Dropout
"dropout_conv": [1],
"dropout_dense": [],
"dropout_rate": 0.75,
# (5) Others
"name": "conv1d_network",
"@no_typecheck": ["out_channels", "kernel_size", "conv_activation",
"other_conv_kwargs", "pool_size", "pool_stride",
"other_pool_kwargs", "out_features",
"dense_activation", "dropout_conv",
"dropout_dense"]
}
def _build_pool_hparams(self):
pool_type = self._hparams.pooling
if pool_type == "MaxPool":
pool_type = "MaxPool1d"
elif pool_type == "AvgPool":
pool_type = "AvgPool1d"
npool = self._hparams.num_conv_layers
kernel_size = _to_list(self._hparams.pool_size, "pool_size", npool)
stride = _to_list(self._hparams.pool_stride, "pool_stride", npool)
other_kwargs = self._hparams.other_pool_kwargs
if isinstance(other_kwargs, HParams):
other_kwargs = other_kwargs.todict()
other_kwargs = _to_list(other_kwargs, "other_kwargs", npool)
elif isinstance(other_kwargs, (list, tuple)):
if len(other_kwargs) != npool:
raise ValueError("The length of hparams['other_pool_kwargs'] "
"must equal 'num_conv_layers'")
else:
raise ValueError("hparams['other_pool_kwargs'] must be either a "
"dict or list/tuple")
pool_hparams = []
for i in range(npool):
kwargs_i = {"kernel_size": kernel_size[i], "stride": stride[i]}
kwargs_i.update(other_kwargs[i])
pool_hparams_ = get_pooling_layer_hparams({"type": pool_type,
"kwargs": kwargs_i})
pool_hparams.append(pool_hparams_)
return pool_hparams
def _build_conv1d_hparams(self, in_channels, pool_hparams):
r"""Creates the hparams for each of the convolutional layers usable for
:func:`texar.torch.core.layers.get_layer`.
"""
nconv = self._hparams.num_conv_layers
if len(pool_hparams) != nconv:
raise ValueError("`pool_hparams` must be of length %d" % nconv)
in_channels = [in_channels]
out_channels = _to_list(self._hparams.out_channels, 'out_channels',
nconv)
# because in_channels(i) = out_channels(i-1)
in_channels.extend(out_channels[:-1])
if nconv == 1:
kernel_size = _to_list(self._hparams.kernel_size)
if not isinstance(kernel_size[0], (list, tuple)):
kernel_size = [kernel_size]
elif nconv > 1:
kernel_size = _to_list(self._hparams.kernel_size,
'kernel_size', nconv)
kernel_size = [_to_list(ks) for ks in kernel_size]
other_kwargs = self._hparams.other_conv_kwargs
if isinstance(other_kwargs, HParams):
other_kwargs = other_kwargs.todict()
other_kwargs = _to_list(other_kwargs, "other_conv_kwargs", nconv)
elif isinstance(other_kwargs, (list, tuple)):
if len(other_kwargs) != nconv:
raise ValueError("The length of hparams['other_conv_kwargs'] "
"must be equal to 'num_conv_layers'")
else:
raise ValueError("hparams['other_conv_kwargs'] must be a either "
"a dict or a list.")
def _activation_hparams(name, kwargs=None):
if kwargs is not None:
return {"type": name, "kwargs": kwargs}
else:
return {"type": name, "kwargs": {}}
conv_pool_hparams = []
for i in range(nconv):
hparams_i = []
names = []
if isinstance(other_kwargs[i], dict):
other_kwargs[i] = _to_list(other_kwargs[i], "other_kwargs[i]",
len(kernel_size[i]))
elif (isinstance(other_kwargs[i], (list, tuple))
and len(other_kwargs[i]) != len(kernel_size[i])):
raise ValueError("The length of hparams['other_conv_kwargs'][i]"
" must be equal to the length of "
"hparams['kernel_size'][i]")
for idx, ks_ij in enumerate(kernel_size[i]):
name = uniquify_str("conv_%d" % (i + 1), names)
names.append(name)
conv_kwargs_ij = {
"in_channels": in_channels[i],
"out_channels": out_channels[i],
"kernel_size": ks_ij
}
conv_kwargs_ij.update(other_kwargs[i][idx])
hparams_i.append(
{"type": "Conv1d", "kwargs": conv_kwargs_ij})
if len(hparams_i) == 1:
if self._hparams.conv_activation:
layers = {
"layers": [hparams_i[0],
_activation_hparams(
self._hparams.conv_activation,
self._hparams.conv_activation_kwargs)]}
sequential_layer = {"type": "Sequential", "kwargs": layers}
conv_pool_hparams.append([sequential_layer,
pool_hparams[i]])
else:
conv_pool_hparams.append([hparams_i[0], pool_hparams[i]])
else: # creates MergeLayer
mrg_kwargs_layers = []
for hparams_ij in hparams_i:
if self._hparams.conv_activation:
seq_kwargs_j = {
"layers": [
hparams_ij,
_activation_hparams(
self._hparams.conv_activation,
self._hparams.conv_activation_kwargs),
pool_hparams[i]
]
}
else:
seq_kwargs_j = {"layers": [hparams_ij, pool_hparams[i]]}
mrg_kwargs_layers.append(
{"type": "Sequential", "kwargs": seq_kwargs_j})
mrg_hparams = {"type": "MergeLayer",
"kwargs": {"layers": mrg_kwargs_layers}}
conv_pool_hparams.append(mrg_hparams)
return conv_pool_hparams
def _build_dense_hparams(self, in_features: int, layer_hparams):
ndense = self._hparams.num_dense_layers
in_features = [in_features]
out_features = _to_list(self._hparams.out_features, 'out_features',
ndense)
# because in_features(i) = out_features(i-1)
in_features.extend(out_features[:-1])
other_kwargs = self._hparams.other_dense_kwargs or {}
if isinstance(other_kwargs, HParams):
other_kwargs = other_kwargs.todict()
if not isinstance(other_kwargs, dict):
raise ValueError("hparams['other_dense_kwargs'] must be a dict.")
def _activation_hparams(name, kwargs=None):
if kwargs is not None:
return {"type": name, "kwargs": kwargs}
else:
return {"type": name, "kwargs": {}}
dense_hparams = []
for i in range(ndense):
kwargs_i = {"in_features": in_features[i],
"out_features": out_features[i]}
kwargs_i.update(other_kwargs)
dense_hparams_i = {"type": "Linear", "kwargs": kwargs_i}
if i < ndense - 1 and self._hparams.dense_activation is not None:
layers = {
"layers": [dense_hparams_i,
_activation_hparams(
self._hparams.dense_activation,
self._hparams.dense_activation_kwargs)
]}
sequential_layer = {"type": "Sequential", "kwargs": layers}
dense_hparams.append(sequential_layer)
elif (i == ndense - 1 and
self._hparams.final_dense_activation is not None):
layers = {
"layers": [dense_hparams_i,
_activation_hparams(
self._hparams.final_dense_activation,
self._hparams.final_dense_activation_kwargs)
]}
sequential_layer = {"type": "Sequential", "kwargs": layers}
dense_hparams.append(sequential_layer)
else:
dense_hparams.append(dense_hparams_i)
def _dropout_hparams():
return {"type": "Dropout",
"kwargs": {"p": self._hparams.dropout_rate}}
dropout_dense = _to_list(self._hparams.dropout_dense)
ndense = self._hparams.num_dense_layers
if ndense > 0: # Add flatten layers before dense layers
layer_hparams.append({"type": "Flatten"})
for dense_i in range(ndense):
if dense_i in dropout_dense:
layer_hparams.append(_dropout_hparams())
layer_hparams.append(dense_hparams[dense_i])
if ndense in dropout_dense:
layer_hparams.append(_dropout_hparams())
return layer_hparams
def _build_non_dense_layer_hparams(self, in_channels):
pool_hparams = self._build_pool_hparams()
conv_pool_hparams = self._build_conv1d_hparams(in_channels,
pool_hparams)
def _dropout_hparams():
return {"type": "Dropout",
"kwargs": {"p": self._hparams.dropout_rate}}
dropout_conv = _to_list(self._hparams.dropout_conv)
layers_hparams = []
nconv = self._hparams.num_conv_layers
for conv_i in range(nconv):
if conv_i in dropout_conv:
layers_hparams.append(_dropout_hparams())
if isinstance(conv_pool_hparams[conv_i], (list, tuple)):
layers_hparams += conv_pool_hparams[conv_i]
else:
layers_hparams.append(conv_pool_hparams[conv_i])
if nconv in dropout_conv:
layers_hparams.append(_dropout_hparams())
return layers_hparams
def forward(self, # type: ignore
input: torch.Tensor,
sequence_length: Union[torch.LongTensor, List[int]] = None,
dtype: Optional[torch.dtype] = None,
data_format: Optional[str] = None) -> torch.Tensor:
r"""Feeds forward inputs through the network layers and returns outputs.
Args:
input: The inputs to the network, which is a 3D tensor.
sequence_length (optional): An :tensor:`LongTensor` of shape
``[batch_size]`` or a python array containing the length of
each element in :attr:`inputs`. If given, time steps beyond
the length will first be masked out before feeding to the
layers.
dtype (optional): Type of the inputs. If not provided,
infers from inputs automatically.
data_format (optional): Data type of the input tensor. If
``channels_last``, the last dimension will be treated as channel
dimension so the size of the :attr:`input` should be
`[batch_size, X, channel]`. If ``channels_first``, first
dimension will be treated as channel dimension so the size
should be `[batch_size, channel, X]`. Defaults to None.
If None, the value will be picked from hyperparameters.
Returns:
The output of the final layer.
"""
if input.dim() != 3:
raise ValueError("'input' should be a 3D tensor.")
if data_format is None:
data_format = self.hparams["data_format"]
if data_format == "channels_first":
# masking requires channels in last dimension
input = input.permute(0, 2, 1)
if sequence_length is not None:
input = mask_sequences(input, sequence_length,
dtype=dtype, time_major=False)
# network is constructed for channel first tensors
input = input.permute(0, 2, 1)
output = super().forward(input)
elif data_format == "channels_last":
if sequence_length is not None:
input = mask_sequences(input, sequence_length,
dtype=dtype, time_major=False)
input = input.permute(0, 2, 1)
output = super().forward(input)
# transpose only when tensors are 3D
if output.dim() == 3:
output = output.permute(0, 2, 1)
else:
raise ValueError("Invalid 'data_format'")
return output
def _infer_dense_layer_input_size(self, input: torch.Tensor) -> torch.Size:
# feed forward the input on the conv part of the network to infer
# input shape for dense layers
with torch.no_grad():
output = super().forward(input)
return output.view(output.size()[0], -1).size()
@property
def output_size(self) -> int:
r"""The feature size of :meth:`forward` output.
"""
if self.hparams.num_dense_layers <= 0:
out_channels = self._hparams.out_channels
if not isinstance(out_channels, (list, tuple)):
out_channels = [out_channels]
nconv = self._hparams.num_conv_layers
if nconv == 1:
kernel_size = _to_list(self._hparams.kernel_size)
if not isinstance(kernel_size[0], (list, tuple)):
kernel_size = [kernel_size]
elif nconv > 1:
kernel_size = _to_list(self._hparams.kernel_size,
'kernel_size', nconv)
kernel_size = [_to_list(ks) for ks in kernel_size]
return out_channels[-1] * len(kernel_size[-1])
else:
out_features = self._hparams.out_features
if isinstance(out_features, (list, tuple)):
return out_features[-1]
else:
return out_features
| [
"torch.no_grad",
"torch.ones"
] | 1.0.0 | Codle/texar-pytorch | d63556e7a8f48076c396467314a771d56552d595 |
1.4 | import logging
import math
import higher
import torch
from torch import nn, optim
import numpy as np
from torch.utils import data
from transformers import AdamW
import datasets
import models.utils
from models.base_models import ReplayMemory, TransformerClsModel, TransformerNeuromodulator
logging.basicConfig(level='INFO', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('ANML-Log')
class ANML:
def __init__(self, device, n_classes, **kwargs):
self.inner_lr = kwargs.get('inner_lr')
self.meta_lr = kwargs.get('meta_lr')
self.write_prob = kwargs.get('write_prob')
self.replay_rate = kwargs.get('replay_rate')
self.replay_every = kwargs.get('replay_every')
self.device = device
self.nm = TransformerNeuromodulator(n_classes, model_name=kwargs.get('model'),
device=device)
self.pn = TransformerClsModel(model_name=kwargs.get('model'),
n_classes=n_classes,
max_length=kwargs.get('max_length'),
device=device)
self.memory = ReplayMemory(write_prob=self.write_prob, tuple_size=2)
self.loss_fn = nn.CrossEntropyLoss()
logger.info('Loaded {} as NM'.format(self.nm.__class__.__name__))
logger.info('Loaded {} as PN'.format(self.pn.__class__.__name__))
meta_params = [p for p in self.nm.parameters() if p.requires_grad] + \
[p for p in self.pn.parameters() if p.requires_grad]
self.meta_optimizer = AdamW(meta_params, lr=self.meta_lr)
inner_params = [p for p in self.pn.parameters() if p.requires_grad]
self.inner_optimizer = optim.SGD(inner_params, lr=self.inner_lr)
def save_model(self, model_path):
checkpoint = {'nm': self.nm.state_dict(),
'pn': self.pn.state_dict()}
torch.save(checkpoint, model_path)
def load_model(self, model_path):
checkpoint = torch.load(model_path)
self.nm.load_state_dict(checkpoint['nm'])
self.pn.load_state_dict(checkpoint['pn'])
def evaluate(self, dataloader, updates, mini_batch_size):
support_set = []
for _ in range(updates):
text, labels = self.memory.read_batch(batch_size=mini_batch_size)
support_set.append((text, labels))
with higher.innerloop_ctx(self.pn, self.inner_optimizer,
copy_initial_weights=False,
track_higher_grads=False) as (fpn, diffopt):
# Inner loop
task_predictions, task_labels = [], []
support_loss = []
for text, labels in support_set:
labels = torch.tensor(labels).to(self.device)
input_dict = self.pn.encode_text(text)
modulation = self.nm(input_dict)
output = fpn(input_dict, modulation, out_from='full')
loss = self.loss_fn(output, labels)
diffopt.step(loss)
pred = models.utils.make_prediction(output.detach())
support_loss.append(loss.item())
task_predictions.extend(pred.tolist())
task_labels.extend(labels.tolist())
acc, prec, rec, f1 = models.utils.calculate_metrics(task_predictions, task_labels)
logger.info('Support set metrics: Loss = {:.4f}, accuracy = {:.4f}, precision = {:.4f}, '
'recall = {:.4f}, F1 score = {:.4f}'.format(np.mean(support_loss), acc, prec, rec, f1))
all_losses, all_predictions, all_labels = [], [], []
for text, labels in dataloader:
labels = torch.tensor(labels).to(self.device)
input_dict = self.pn.encode_text(text)
with torch.no_grad():
repr = fpn(input_dict, out_from='transformers')
modulation = self.nm(input_dict)
output = fpn(repr * modulation, out_from='linear')
loss = self.loss_fn(output, labels)
loss = loss.item()
pred = models.utils.make_prediction(output.detach())
all_losses.append(loss)
all_predictions.extend(pred.tolist())
all_labels.extend(labels.tolist())
acc, prec, rec, f1 = models.utils.calculate_metrics(all_predictions, all_labels)
logger.info('Test metrics: Loss = {:.4f}, accuracy = {:.4f}, precision = {:.4f}, recall = {:.4f}, '
'F1 score = {:.4f}'.format(np.mean(all_losses), acc, prec, rec, f1))
return acc, prec, rec, f1
def training(self, train_datasets, **kwargs):
updates = kwargs.get('updates')
mini_batch_size = kwargs.get('mini_batch_size')
if self.replay_rate != 0:
replay_batch_freq = self.replay_every // mini_batch_size
replay_freq = int(math.ceil((replay_batch_freq + 1) / (updates + 1)))
replay_steps = int(self.replay_every * self.replay_rate / mini_batch_size)
else:
replay_freq = 0
replay_steps = 0
logger.info('Replay frequency: {}'.format(replay_freq))
logger.info('Replay steps: {}'.format(replay_steps))
concat_dataset = data.ConcatDataset(train_datasets)
train_dataloader = iter(data.DataLoader(concat_dataset, batch_size=mini_batch_size, shuffle=False,
collate_fn=datasets.utils.batch_encode))
episode_id = 0
while True:
self.inner_optimizer.zero_grad()
support_loss, support_acc, support_prec, support_rec, support_f1 = [], [], [], [], []
with higher.innerloop_ctx(self.pn, self.inner_optimizer,
copy_initial_weights=False,
track_higher_grads=False) as (fpn, diffopt):
# Inner loop
support_set = []
task_predictions, task_labels = [], []
for _ in range(updates):
try:
text, labels = next(train_dataloader)
support_set.append((text, labels))
except StopIteration:
logger.info('Terminating training as all the data is seen')
return
for text, labels in support_set:
labels = torch.tensor(labels).to(self.device)
input_dict = self.pn.encode_text(text)
modulation = self.nm(input_dict)
output = fpn(input_dict, modulation, out_from='full')
loss = self.loss_fn(output, labels)
diffopt.step(loss)
pred = models.utils.make_prediction(output.detach())
support_loss.append(loss.item())
task_predictions.extend(pred.tolist())
task_labels.extend(labels.tolist())
self.memory.write_batch(text, labels)
acc, prec, rec, f1 = models.utils.calculate_metrics(task_predictions, task_labels)
logger.info('Episode {} support set: Loss = {:.4f}, accuracy = {:.4f}, precision = {:.4f}, '
'recall = {:.4f}, F1 score = {:.4f}'.format(episode_id + 1,
np.mean(support_loss), acc, prec, rec, f1))
# Outer loop
query_loss, query_acc, query_prec, query_rec, query_f1 = [], [], [], [], []
query_set = []
if self.replay_rate != 0 and (episode_id + 1) % replay_freq == 0:
for _ in range(replay_steps):
text, labels = self.memory.read_batch(batch_size=mini_batch_size)
query_set.append((text, labels))
else:
try:
text, labels = next(train_dataloader)
query_set.append((text, labels))
self.memory.write_batch(text, labels)
except StopIteration:
logger.info('Terminating training as all the data is seen')
return
for text, labels in query_set:
labels = torch.tensor(labels).to(self.device)
input_dict = self.pn.encode_text(text)
# repr = fpn(input_dict, out_from='transformers')
modulation = self.nm(input_dict)
output = fpn(input_dict, modulation, out_from='full')
# output = fpn(repr * modulation, out_from='linear')
loss = self.loss_fn(output, labels)
query_loss.append(loss.item())
pred = models.utils.make_prediction(output.detach())
acc, prec, rec, f1 = models.utils.calculate_metrics(pred.tolist(), labels.tolist())
query_acc.append(acc)
query_prec.append(prec)
query_rec.append(rec)
query_f1.append(f1)
# NM meta gradients
nm_params = [p for p in self.nm.parameters() if p.requires_grad]
meta_nm_grads = torch.autograd.grad(loss, nm_params, retain_graph=True)
for param, meta_grad in zip(nm_params, meta_nm_grads):
if param.grad is not None:
param.grad += meta_grad.detach()
else:
param.grad = meta_grad.detach()
# PN meta gradients
pn_params = [p for p in fpn.parameters() if p.requires_grad]
meta_pn_grads = torch.autograd.grad(loss, pn_params)
pn_params = [p for p in self.pn.parameters() if p.requires_grad]
for param, meta_grad in zip(pn_params, meta_pn_grads):
if param.grad is not None:
param.grad += meta_grad.detach()
else:
param.grad = meta_grad.detach()
# Meta optimizer step
self.meta_optimizer.step()
self.meta_optimizer.zero_grad()
logger.info('Episode {} query set: Loss = {:.4f}, accuracy = {:.4f}, precision = {:.4f}, '
'recall = {:.4f}, F1 score = {:.4f}'.format(episode_id + 1,
np.mean(query_loss), np.mean(query_acc),
np.mean(query_prec), np.mean(query_rec),
np.mean(query_f1)))
episode_id += 1
def testing(self, test_datasets, **kwargs):
updates = kwargs.get('updates')
mini_batch_size = kwargs.get('mini_batch_size')
accuracies, precisions, recalls, f1s = [], [], [], []
for test_dataset in test_datasets:
logger.info('Testing on {}'.format(test_dataset.__class__.__name__))
test_dataloader = data.DataLoader(test_dataset, batch_size=mini_batch_size, shuffle=False,
collate_fn=datasets.utils.batch_encode)
acc, prec, rec, f1 = self.evaluate(dataloader=test_dataloader, updates=updates, mini_batch_size=mini_batch_size)
accuracies.append(acc)
precisions.append(prec)
recalls.append(rec)
f1s.append(f1)
logger.info('Overall test metrics: Accuracy = {:.4f}, precision = {:.4f}, recall = {:.4f}, '
'F1 score = {:.4f}'.format(np.mean(accuracies), np.mean(precisions), np.mean(recalls),
np.mean(f1s)))
return accuracies
| [
"torch.utils.data.ConcatDataset",
"torch.save",
"torch.optim.SGD",
"torch.no_grad",
"torch.autograd.grad",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 1.4.0 | mjhoshea/MetaLifelongLanguage | 22327dec0038a50276ba0994258f6f2fd46fbae7 |
1.5 | import os
import pickle
# import pickle5 as pickle
import random
import warnings
from distutils.util import strtobool
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from environments.parallel_envs import make_vec_envs
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# def save_models(args, logger, policy, vae, envs, iter_idx):
# # TODO: save parameters, not entire model
#
# save_path = os.path.join(logger.full_output_folder, 'models')
# if not os.path.exists(save_path):
# os.mkdir(save_path)
# try:
# torch.save(policy.actor_critic, os.path.join(save_path, "policy{0}.pt".format(iter_idx)))
# except AttributeError:
# torch.save(policy.policy, os.path.join(save_path, "policy{0}.pt".format(iter_idx)))
# torch.save(vae.encoder, os.path.join(save_path, "encoder{0}.pt".format(iter_idx)))
# if vae.state_decoder is not None:
# torch.save(vae.state_decoder, os.path.join(save_path, "state_decoder{0}.pt".format(iter_idx)))
# if vae.reward_decoder is not None:
# torch.save(vae.reward_decoder,
# os.path.join(save_path, "reward_decoder{0}.pt".format(iter_idx)))
# if vae.task_decoder is not None:
# torch.save(vae.task_decoder, os.path.join(save_path, "task_decoder{0}.pt".format(iter_idx)))
#
# # save normalisation params of envs
# if args.norm_rew_for_policy:
# rew_rms = envs.venv.ret_rms
# save_obj(rew_rms, save_path, "env_rew_rms{0}.pkl".format(iter_idx))
# if args.norm_obs_for_policy:
# obs_rms = envs.venv.obs_rms
# save_obj(obs_rms, save_path, "env_obs_rms{0}.pkl".format(iter_idx))
def reset_env(env, args, indices=None, state=None):
""" env can be many environments or just one """
# reset all environments
if (indices is None) or (len(indices) == args.num_processes):
state = env.reset().float().to(device)
# reset only the ones given by indices
else:
assert state is not None
for i in indices:
state[i] = env.reset(index=i)
belief = torch.from_numpy(env.get_belief()).float().to(device) if args.pass_belief_to_policy else None
task = torch.from_numpy(env.get_task()).float().to(device) if args.pass_task_to_policy else None
return state, belief, task
def squash_action(action, args):
if args.norm_actions_post_sampling:
return torch.tanh(action)
else:
return action
def env_step(env, action, args):
act = squash_action(action.detach(), args)
next_obs, reward, done, infos = env.step(act)
if isinstance(next_obs, list):
next_obs = [o.to(device) for o in next_obs]
else:
next_obs = next_obs.to(device)
if isinstance(reward, list):
reward = [r.to(device) for r in reward]
else:
reward = reward.to(device)
belief = torch.from_numpy(env.get_belief()).float().to(device) if args.pass_belief_to_policy else None
task = torch.from_numpy(env.get_task()).float().to(device) if (args.pass_task_to_policy or args.decode_task) else None
return [next_obs, belief, task], reward, done, infos
def select_action(args,
policy,
deterministic,
state=None,
belief=None,
task=None,
latent_sample=None, latent_mean=None, latent_logvar=None):
""" Select action using the policy. """
latent = get_latent_for_policy(args=args, latent_sample=latent_sample, latent_mean=latent_mean,
latent_logvar=latent_logvar)
action = policy.act(state=state, latent=latent, belief=belief, task=task, deterministic=deterministic)
if isinstance(action, list) or isinstance(action, tuple):
value, action = action
else:
value = None
action = action.to(device)
return value, action
def get_latent_for_policy(args, latent_sample=None, latent_mean=None, latent_logvar=None):
if (latent_sample is None) and (latent_mean is None) and (latent_logvar is None):
return None
if args.add_nonlinearity_to_latent:
latent_sample = F.relu(latent_sample)
latent_mean = F.relu(latent_mean)
latent_logvar = F.relu(latent_logvar)
if args.sample_embeddings:
latent = latent_sample
else:
latent = torch.cat((latent_mean, latent_logvar), dim=-1)
if latent.shape[0] == 1:
latent = latent.squeeze(0)
return latent
def update_encoding(encoder, next_obs, action, reward, done, hidden_state):
# reset hidden state of the recurrent net when we reset the task
if done is not None:
hidden_state = encoder.reset_hidden(hidden_state, done)
with torch.no_grad():
latent_sample, latent_mean, latent_logvar, hidden_state = encoder(actions=action.float(),
states=next_obs,
rewards=reward,
hidden_state=hidden_state,
return_prior=False)
# TODO: move the sampling out of the encoder!
return latent_sample, latent_mean, latent_logvar, hidden_state
def seed(seed, deterministic_execution=False):
print('Seeding random, torch, numpy.')
random.seed(seed)
torch.manual_seed(seed)
torch.random.manual_seed(seed)
np.random.seed(seed)
if deterministic_execution:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
print('Note that due to parallel processing results will be similar but not identical. '
'Use only one process and set --deterministic_execution to True if you want identical results '
'(only recommended for debugging).')
def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):
"""Decreases the learning rate linearly"""
lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def recompute_embeddings(
policy_storage,
encoder,
sample,
update_idx,
detach_every
):
# get the prior
latent_sample = [policy_storage.latent_samples[0].detach().clone()]
latent_mean = [policy_storage.latent_mean[0].detach().clone()]
latent_logvar = [policy_storage.latent_logvar[0].detach().clone()]
latent_sample[0].requires_grad = True
latent_mean[0].requires_grad = True
latent_logvar[0].requires_grad = True
# loop through experience and update hidden state
# (we need to loop because we sometimes need to reset the hidden state)
h = policy_storage.hidden_states[0].detach()
for i in range(policy_storage.actions.shape[0]):
# reset hidden state of the GRU when we reset the task
h = encoder.reset_hidden(h, policy_storage.done[i + 1])
ts, tm, tl, h = encoder(policy_storage.actions.float()[i:i + 1],
policy_storage.next_state[i:i + 1],
policy_storage.rewards_raw[i:i + 1],
h,
sample=sample,
return_prior=False,
detach_every=detach_every
)
# print(i, reset_task.sum())
# print(i, (policy_storage.latent_mean[i + 1] - tm).sum())
# print(i, (policy_storage.latent_logvar[i + 1] - tl).sum())
# print(i, (policy_storage.hidden_states[i + 1] - h).sum())
latent_sample.append(ts)
latent_mean.append(tm)
latent_logvar.append(tl)
if update_idx == 0:
try:
assert (torch.cat(policy_storage.latent_mean) - torch.cat(latent_mean)).sum() == 0
assert (torch.cat(policy_storage.latent_logvar) - torch.cat(latent_logvar)).sum() == 0
except AssertionError:
warnings.warn('You are not recomputing the embeddings correctly!')
import pdb
pdb.set_trace()
policy_storage.latent_samples = latent_sample
policy_storage.latent_mean = latent_mean
policy_storage.latent_logvar = latent_logvar
class FeatureExtractor(nn.Module):
""" Used for extrating features for states/actions/rewards """
def __init__(self, input_size, output_size, activation_function):
super(FeatureExtractor, self).__init__()
self.output_size = output_size
self.activation_function = activation_function
if self.output_size != 0:
self.fc = nn.Linear(input_size, output_size)
else:
self.fc = None
def forward(self, inputs):
if self.output_size != 0:
return self.activation_function(self.fc(inputs))
else:
return torch.zeros(0, ).to(device)
def sample_gaussian(mu, logvar, num=None):
std = torch.exp(0.5 * logvar)
if num is not None:
std = std.repeat(num, 1)
mu = mu.repeat(num, 1)
eps = torch.randn_like(std)
return mu + std * eps
def save_obj(obj, folder, name):
filename = os.path.join(folder, name + '.pkl')
with open(filename, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(folder, name):
filename = os.path.join(folder, name + '.pkl')
with open(filename, 'rb') as f:
return pickle.load(f)
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# PyTorch version.
def __init__(self, epsilon=1e-4, shape=()):
self.mean = torch.zeros(shape).float().to(device)
self.var = torch.ones(shape).float().to(device)
self.count = epsilon
def update(self, x):
x = x.view((-1, x.shape[-1]))
batch_mean = x.mean(dim=0)
batch_var = x.var(dim=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + torch.pow(delta, 2) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def boolean_argument(value):
"""Convert a string value to boolean."""
return bool(strtobool(value))
def get_task_dim(args):
env = make_vec_envs(env_name=args.env_name, seed=args.seed, num_processes=args.num_processes,
gamma=args.policy_gamma, device=device,
episodes_per_task=args.max_rollouts_per_task,
normalise_rew=args.norm_rew_for_policy, ret_rms=None,
tasks=None
)
return env.task_dim
def get_num_tasks(args):
env = make_vec_envs(env_name=args.env_name, seed=args.seed, num_processes=args.num_processes,
gamma=args.policy_gamma, device=device,
episodes_per_task=args.max_rollouts_per_task,
normalise_rew=args.norm_rew_for_policy, ret_rms=None,
tasks=None
)
try:
num_tasks = env.num_tasks
except AttributeError:
num_tasks = None
return num_tasks
def clip(value, low, high):
"""Imitates `{np,tf}.clip`.
`torch.clamp` doesn't support tensor valued low/high so this provides the
clip functionality.
TODO(hartikainen): The broadcasting hasn't been extensively tested yet,
but works for the regular cases where
`value.shape == low.shape == high.shape` or when `{low,high}.shape == ()`.
"""
low, high = torch.tensor(low), torch.tensor(high)
assert torch.all(low <= high), (low, high)
clipped_value = torch.max(torch.min(value, high), low)
return clipped_value
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.min",
"torch.no_grad",
"torch.nn.functional.relu",
"torch.manual_seed",
"torch.random.manual_seed",
"torch.randn_like",
"torch.all",
"torch.cuda.is_available",
"torch.tensor",
"torch.ones",
"torch.tanh",
"torch.exp",
"torch.pow"
] | 1.5.1 | MetaMind/varibad | 75e26430d83296c0ee3a7ac3ebb1506b7cf7d49e |
0.6 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..base import modules
class PSPBlock(nn.Module):
def __init__(self, in_channels, out_channels, pool_size, use_bathcnorm=True):
super().__init__()
if pool_size == 1:
use_bathcnorm = False # PyTorch does not support BatchNorm for 1x1 shape
self.pool = nn.Sequential(
nn.AdaptiveAvgPool2d(output_size=(pool_size, pool_size)),
modules.Conv2dReLU(in_channels, out_channels, (1, 1), use_batchnorm=use_bathcnorm)
)
def forward(self, x):
h, w = x.size(2), x.size(3)
x = self.pool(x)
x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True)
return x
class PSPModule(nn.Module):
def __init__(self, in_channels, sizes=(1, 2, 3, 6), use_bathcnorm=True):
super().__init__()
self.blocks = nn.ModuleList([
PSPBlock(in_channels, in_channels // len(sizes), size, use_bathcnorm=use_bathcnorm) for size in sizes
])
def forward(self, x):
xs = [block(x) for block in self.blocks] + [x]
x = torch.cat(xs, dim=1)
return x
class PSPDecoder(nn.Module):
def __init__(
self,
encoder_channels,
use_batchnorm=True,
out_channels=512,
dropout=0.2,
):
super().__init__()
self.psp = PSPModule(
in_channels=encoder_channels[-1],
sizes=(1, 2, 3, 6),
use_bathcnorm=use_batchnorm,
)
self.conv = modules.Conv2dReLU(
in_channels=encoder_channels[-1] * 2,
out_channels=out_channels,
kernel_size=1,
use_batchnorm=use_batchnorm,
)
self.dropout = nn.Dropout2d(p=dropout)
def forward(self, *features):
x = features[-1]
x = self.psp(x)
x = self.conv(x)
x = self.dropout(x)
return x
| [
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate",
"torch.cat",
"torch.nn.Dropout2d"
] | 0.6.3 | yida2311/segmentation_models.pytorch | e5c0b8b915a7d8c82222cc319030b241b12b0cf2 |
1.4 | # -*- coding: utf-8 -*-
'''
@Time : 2020/05/06 21:09
@Author : Tianxiaomo
@File : dataset.py
@Noice :
@Modificattion :
@Author :
@Time :
@Detail :
'''
import os
import random
import sys
import cv2
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
def rand_uniform_strong(min, max):
if min > max:
swap = min
min = max
max = swap
return random.random() * (max - min) + min
def rand_scale(s):
scale = rand_uniform_strong(1, s)
if random.randint(0, 1) % 2:
return scale
return 1. / scale
def rand_precalc_random(min, max, random_part):
if max < min:
swap = min
min = max
max = swap
return (random_part * (max - min)) + min
def fill_truth_detection(bboxes, num_boxes, classes, flip, dx, dy, sx, sy, net_w, net_h):
if bboxes.shape[0] == 0:
return bboxes, 10000
np.random.shuffle(bboxes)
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = np.clip(bboxes[:, 3], 0, sy)
out_box = list(np.where(((bboxes[:, 1] == sy) & (bboxes[:, 3] == sy)) |
((bboxes[:, 0] == sx) & (bboxes[:, 2] == sx)) |
((bboxes[:, 1] == 0) & (bboxes[:, 3] == 0)) |
((bboxes[:, 0] == 0) & (bboxes[:, 2] == 0)))[0])
list_box = list(range(bboxes.shape[0]))
for i in out_box:
list_box.remove(i)
bboxes = bboxes[list_box]
if bboxes.shape[0] == 0:
return bboxes, 10000
bboxes = bboxes[np.where((bboxes[:, 4] < classes) & (bboxes[:, 4] >= 0))[0]]
if bboxes.shape[0] > num_boxes:
bboxes = bboxes[:num_boxes]
min_w_h = np.array([bboxes[:, 2] - bboxes[:, 0], bboxes[:, 3] - bboxes[:, 1]]).min()
bboxes[:, 0] *= (net_w / sx)
bboxes[:, 2] *= (net_w / sx)
bboxes[:, 1] *= (net_h / sy)
bboxes[:, 3] *= (net_h / sy)
if flip:
temp = net_w - bboxes[:, 0]
bboxes[:, 0] = net_w - bboxes[:, 2]
bboxes[:, 2] = temp
return bboxes, min_w_h
def rect_intersection(a, b):
minx = max(a[0], b[0])
miny = max(a[1], b[1])
maxx = min(a[2], b[2])
maxy = min(a[3], b[3])
return [minx, miny, maxx, maxy]
def image_data_augmentation(mat, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur,
truth):
try:
img = mat
oh, ow, _ = img.shape
pleft, ptop, swidth, sheight = int(pleft), int(ptop), int(swidth), int(sheight)
# crop
src_rect = [pleft, ptop, swidth + pleft, sheight + ptop] # x1,y1,x2,y2
img_rect = [0, 0, ow, oh]
new_src_rect = rect_intersection(src_rect, img_rect) # 交集
dst_rect = [max(0, -pleft), max(0, -ptop), max(0, -pleft) + new_src_rect[2] - new_src_rect[0],
max(0, -ptop) + new_src_rect[3] - new_src_rect[1]]
# cv2.Mat sized
if (src_rect[0] == 0 and src_rect[1] == 0 and src_rect[2] == img.shape[0] and src_rect[3] == img.shape[1]):
sized = cv2.resize(img, (w, h), cv2.INTER_LINEAR)
else:
cropped = np.zeros([sheight, swidth, 3])
cropped[:, :, ] = np.mean(img, axis=(0, 1))
cropped[dst_rect[1]:dst_rect[3], dst_rect[0]:dst_rect[2]] = \
img[new_src_rect[1]:new_src_rect[3], new_src_rect[0]:new_src_rect[2]]
# resize
sized = cv2.resize(cropped, (w, h), cv2.INTER_LINEAR)
# flip
if flip:
# cv2.Mat cropped
sized = cv2.flip(sized, 1) # 0 - x-axis, 1 - y-axis, -1 - both axes (x & y)
# HSV augmentation
# cv2.COLOR_BGR2HSV, cv2.COLOR_RGB2HSV, cv2.COLOR_HSV2BGR, cv2.COLOR_HSV2RGB
if dsat != 1 or dexp != 1 or dhue != 0:
if img.shape[2] >= 3:
hsv_src = cv2.cvtColor(sized.astype(np.float32), cv2.COLOR_RGB2HSV) # RGB to HSV
hsv = cv2.split(hsv_src)
hsv[1] *= dsat
hsv[2] *= dexp
hsv[0] += 179 * dhue
hsv_src = cv2.merge(hsv)
sized = np.clip(cv2.cvtColor(hsv_src, cv2.COLOR_HSV2RGB), 0, 255) # HSV to RGB (the same as previous)
else:
sized *= dexp
if blur:
if blur == 1:
dst = cv2.GaussianBlur(sized, (17, 17), 0)
# cv2.bilateralFilter(sized, dst, 17, 75, 75)
else:
ksize = (blur / 2) * 2 + 1
dst = cv2.GaussianBlur(sized, (ksize, ksize), 0)
if blur == 1:
img_rect = [0, 0, sized.cols, sized.rows]
for b in truth:
left = (b.x - b.w / 2.) * sized.shape[1]
width = b.w * sized.shape[1]
top = (b.y - b.h / 2.) * sized.shape[0]
height = b.h * sized.shape[0]
roi(left, top, width, height)
roi = roi & img_rect
dst[roi[0]:roi[0] + roi[2], roi[1]:roi[1] + roi[3]] = sized[roi[0]:roi[0] + roi[2],
roi[1]:roi[1] + roi[3]]
sized = dst
if gaussian_noise:
noise = np.array(sized.shape)
gaussian_noise = min(gaussian_noise, 127)
gaussian_noise = max(gaussian_noise, 0)
cv2.randn(noise, 0, gaussian_noise) # mean and variance
sized = sized + noise
except:
print("OpenCV can't augment image: " + str(w) + " x " + str(h))
sized = mat
return sized
def filter_truth(bboxes, dx, dy, sx, sy, xd, yd):
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = np.clip(bboxes[:, 3], 0, sy)
out_box = list(np.where(((bboxes[:, 1] == sy) & (bboxes[:, 3] == sy)) |
((bboxes[:, 0] == sx) & (bboxes[:, 2] == sx)) |
((bboxes[:, 1] == 0) & (bboxes[:, 3] == 0)) |
((bboxes[:, 0] == 0) & (bboxes[:, 2] == 0)))[0])
list_box = list(range(bboxes.shape[0]))
for i in out_box:
list_box.remove(i)
bboxes = bboxes[list_box]
bboxes[:, 0] += xd
bboxes[:, 2] += xd
bboxes[:, 1] += yd
bboxes[:, 3] += yd
return bboxes
def blend_truth_mosaic(out_img, img, bboxes, w, h, cut_x, cut_y, i_mixup,
left_shift, right_shift, top_shift, bot_shift):
left_shift = min(left_shift, w - cut_x)
top_shift = min(top_shift, h - cut_y)
right_shift = min(right_shift, cut_x)
bot_shift = min(bot_shift, cut_y)
if i_mixup == 0:
bboxes = filter_truth(bboxes, left_shift, top_shift, cut_x, cut_y, 0, 0)
out_img[:cut_y, :cut_x] = img[top_shift:top_shift + cut_y, left_shift:left_shift + cut_x]
if i_mixup == 1:
bboxes = filter_truth(bboxes, cut_x - right_shift, top_shift, w - cut_x, cut_y, cut_x, 0)
out_img[:cut_y, cut_x:] = img[top_shift:top_shift + cut_y, cut_x - right_shift:w - right_shift]
if i_mixup == 2:
bboxes = filter_truth(bboxes, left_shift, cut_y - bot_shift, cut_x, h - cut_y, 0, cut_y)
out_img[cut_y:, :cut_x] = img[cut_y - bot_shift:h - bot_shift, left_shift:left_shift + cut_x]
if i_mixup == 3:
bboxes = filter_truth(bboxes, cut_x - right_shift, cut_y - bot_shift, w - cut_x, h - cut_y, cut_x, cut_y)
out_img[cut_y:, cut_x:] = img[cut_y - bot_shift:h - bot_shift, cut_x - right_shift:w - right_shift]
return out_img, bboxes
def draw_box(img, bboxes):
for b in bboxes:
img = cv2.rectangle(img, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2)
return img
class Yolo_dataset(Dataset):
def __init__(self, label_path, cfg, train=True):
super(Yolo_dataset, self).__init__()
if cfg.mixup == 2:
print("cutmix=1 - isn't supported for Detector")
raise
elif cfg.mixup == 2 and cfg.letter_box:
print("Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters")
raise
self.cfg = cfg
self.train = train
truth = {}
f = open(label_path, 'r', encoding='utf-8')
for line in f.readlines():
data = line.split(" ")
truth[data[0]] = []
for i in data[1:]:
truth[data[0]].append([int(float(j)) for j in i.split(',')])
self.truth = truth
self.imgs = list(self.truth.keys())
def __len__(self):
return len(self.truth.keys())
def __getitem__(self, index):
if not self.train:
return self._get_val_item(index)
img_path = self.imgs[index]
bboxes = np.array(self.truth.get(img_path), dtype=np.float)
img_path = os.path.join(self.cfg.dataset_dir, img_path)
use_mixup = self.cfg.mixup
if random.randint(0, 1):
use_mixup = 0
if use_mixup == 3:
min_offset = 0.2
cut_x = random.randint(int(self.cfg.w * min_offset), int(self.cfg.w * (1 - min_offset)))
cut_y = random.randint(int(self.cfg.h * min_offset), int(self.cfg.h * (1 - min_offset)))
r1, r2, r3, r4, r_scale = 0, 0, 0, 0, 0
dhue, dsat, dexp, flip, blur = 0, 0, 0, 0, 0
gaussian_noise = 0
out_img = np.zeros([self.cfg.h, self.cfg.w, 3])
out_bboxes = []
for i in range(use_mixup + 1):
if i != 0:
img_path = random.choice(list(self.truth.keys()))
bboxes = np.array(self.truth.get(img_path), dtype=np.float)
img_path = os.path.join(self.cfg.dataset_dir, img_path)
img = cv2.imread(img_path)
if img is None:
continue
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
oh, ow, oc = img.shape
dh, dw, dc = np.array(np.array([oh, ow, oc]) * self.cfg.jitter, dtype=np.int)
dhue = rand_uniform_strong(-self.cfg.hue, self.cfg.hue)
dsat = rand_scale(self.cfg.saturation)
dexp = rand_scale(self.cfg.exposure)
pleft = random.randint(-dw, dw)
pright = random.randint(-dw, dw)
ptop = random.randint(-dh, dh)
pbot = random.randint(-dh, dh)
flip = random.randint(0, 1) if self.cfg.flip else 0
if (self.cfg.blur):
tmp_blur = random.randint(0, 2) # 0 - disable, 1 - blur background, 2 - blur the whole image
if tmp_blur == 0:
blur = 0
elif tmp_blur == 1:
blur = 1
else:
blur = self.cfg.blur
if self.cfg.gaussian and random.randint(0, 1):
gaussian_noise = self.cfg.gaussian
else:
gaussian_noise = 0
if self.cfg.letter_box:
img_ar = ow / oh
net_ar = self.cfg.w / self.cfg.h
result_ar = img_ar / net_ar
# print(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if result_ar > 1: # sheight - should be increased
oh_tmp = ow / net_ar
delta_h = (oh_tmp - oh) / 2
ptop = ptop - delta_h
pbot = pbot - delta_h
# print(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
else: # swidth - should be increased
ow_tmp = oh * net_ar
delta_w = (ow_tmp - ow) / 2
pleft = pleft - delta_w
pright = pright - delta_w
# printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
swidth = ow - pleft - pright
sheight = oh - ptop - pbot
truth, min_w_h = fill_truth_detection(bboxes, self.cfg.boxes, self.cfg.classes, flip, pleft, ptop, swidth,
sheight, self.cfg.w, self.cfg.h)
if (min_w_h / 8) < blur and blur > 1: # disable blur if one of the objects is too small
blur = min_w_h / 8
ai = image_data_augmentation(img, self.cfg.w, self.cfg.h, pleft, ptop, swidth, sheight, flip,
dhue, dsat, dexp, gaussian_noise, blur, truth)
if use_mixup == 0:
out_img = ai
out_bboxes = truth
if use_mixup == 1:
if i == 0:
old_img = ai.copy()
old_truth = truth.copy()
elif i == 1:
out_img = cv2.addWeighted(ai, 0.5, old_img, 0.5)
out_bboxes = np.concatenate([old_truth, truth], axis=0)
elif use_mixup == 3:
if flip:
tmp = pleft
pleft = pright
pright = tmp
left_shift = int(min(cut_x, max(0, (-int(pleft) * self.cfg.w / swidth))))
top_shift = int(min(cut_y, max(0, (-int(ptop) * self.cfg.h / sheight))))
right_shift = int(min((self.cfg.w - cut_x), max(0, (-int(pright) * self.cfg.w / swidth))))
bot_shift = int(min(self.cfg.h - cut_y, max(0, (-int(pbot) * self.cfg.h / sheight))))
out_img, out_bbox = blend_truth_mosaic(out_img, ai, truth.copy(), self.cfg.w, self.cfg.h, cut_x,
cut_y, i, left_shift, right_shift, top_shift, bot_shift)
out_bboxes.append(out_bbox)
# print(img_path)
if use_mixup == 3:
out_bboxes = np.concatenate(out_bboxes, axis=0)
out_bboxes1 = np.zeros([self.cfg.boxes, 5])
out_bboxes1[:min(out_bboxes.shape[0], self.cfg.boxes)] = out_bboxes[:min(out_bboxes.shape[0], self.cfg.boxes)]
return out_img, out_bboxes1
def _get_val_item(self, index):
"""
"""
img_path = self.imgs[index]
bboxes_with_cls_id = np.array(self.truth.get(img_path), dtype=np.float)
img = cv2.imread(os.path.join(self.cfg.dataset_dir, img_path))
# img_height, img_width = img.shape[:2]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = cv2.resize(img, (self.cfg.w, self.cfg.h))
# img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
num_objs = len(bboxes_with_cls_id)
target = {}
# boxes to coco format
boxes = bboxes_with_cls_id[...,:4]
boxes[..., 2:] = boxes[..., 2:] - boxes[..., :2] # box width, box height
target['boxes'] = torch.as_tensor(boxes, dtype=torch.float32)
target['labels'] = torch.as_tensor(bboxes_with_cls_id[...,-1].flatten(), dtype=torch.int64)
target['image_id'] = torch.tensor([get_image_id(img_path)])
target['area'] = (target['boxes'][:,3])*(target['boxes'][:,2])
target['iscrowd'] = torch.zeros((num_objs,), dtype=torch.int64)
return img, target
def get_image_id(filename:str) -> int:
"""
Convert a string to a integer.
Make sure that the images and the `image_id`s are in one-one correspondence.
There are already `image_id`s in annotations of the COCO dataset,
in which case this function is unnecessary.
For creating one's own `get_image_id` function, one can refer to
https://github.com/google/automl/blob/master/efficientdet/dataset/create_pascal_tfrecord.py#L86
or refer to the following code (where the filenames are like 'level1_123.jpg')
>>> lv, no = os.path.splitext(os.path.basename(filename))[0].split("_")
>>> lv = lv.replace("level", "")
>>> no = f"{int(no):04d}"
>>> return int(lv+no)
"""
# raise NotImplementedError("Create your own 'get_image_id' function")
# lv, no = os.path.splitext(os.path.basename(filename))[0].split("_")
# lv = lv.replace("level", "")
# no = f"{int(no):04d}"
# return int(lv+no)
# print("You could also create your own 'get_image_id' function.")
# print(filename)
parts = filename.split("/")[-1].split(".")[0].split('_')
id = int(parts[-1])
# print(id)
return id
if __name__ == "__main__":
from cfg import Cfg
import matplotlib.pyplot as plt
random.seed(2020)
np.random.seed(2020)
Cfg.dataset_dir = '/mnt/e/Dataset'
dataset = Yolo_dataset(Cfg.train_label, Cfg)
for i in range(100):
out_img, out_bboxes = dataset.__getitem__(i)
a = draw_box(out_img.copy(), out_bboxes.astype(np.int32))
plt.imshow(a.astype(np.int32))
plt.show()
| [
"torch.zeros",
"torch.as_tensor"
] | 1.4.0 | LipatJob/pytorch-YOLOv4 | ba91f3c7a3e54a557e045d4276ac08d0a85afbee |
1.3 | import numpy as np
import unittest
import torch
import os
import heat as ht
if os.environ.get("DEVICE") == "gpu" and torch.cuda.is_available():
ht.use_device("gpu")
torch.cuda.set_device(torch.device(ht.get_device().torch_device))
else:
ht.use_device("cpu")
device = ht.get_device().torch_device
ht_device = None
if os.environ.get("DEVICE") == "lgpu" and torch.cuda.is_available():
device = ht.gpu.torch_device
ht_device = ht.gpu
torch.cuda.set_device(device)
class TestCommunication(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = torch.tensor([[3, 2, 1], [4, 5, 6]], dtype=torch.float32, device=device)
cls.sorted3Dtensor = ht.float32(
[
[[0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [20, 21, 22, 23, 24]],
[[100, 101, 102, 103, 104], [110, 111, 112, 113, 114], [120, 121, 122, 123, 124]],
],
device=ht_device,
)
def test_self_communicator(self):
comm = ht.core.communication.MPI_SELF
with self.assertRaises(ValueError):
comm.chunk(self.data.shape, split=2)
with self.assertRaises(ValueError):
comm.chunk(self.data.shape, split=-3)
with self.assertRaises(TypeError):
comm.chunk(self.data.shape, split=0, rank="dicndjh")
offset, lshape, chunks = comm.chunk(self.data.shape, split=0)
self.assertIsInstance(offset, int)
self.assertEqual(offset, 0)
self.assertIsInstance(lshape, tuple)
self.assertEqual(len(lshape), len(self.data.shape))
self.assertEqual(lshape, self.data.shape)
self.assertIsInstance(chunks, tuple)
self.assertEqual(len(chunks), len(self.data.shape))
self.assertEqual(1, (self.data == self.data[chunks]).all().item())
def test_mpi_communicator(self):
comm = ht.core.communication.MPI_WORLD
self.assertLess(comm.rank, comm.size)
with self.assertRaises(ValueError):
comm.chunk(self.data.shape, split=2)
with self.assertRaises(ValueError):
comm.chunk(self.data.shape, split=-3)
offset, lshape, chunks = comm.chunk(self.data.shape, split=0)
self.assertIsInstance(offset, int)
self.assertGreaterEqual(offset, 0)
self.assertLessEqual(offset, self.data.shape[0])
self.assertIsInstance(lshape, tuple)
self.assertEqual(len(lshape), len(self.data.shape))
self.assertGreaterEqual(lshape[0], 0)
self.assertLessEqual(lshape[0], self.data.shape[0])
self.assertIsInstance(chunks, tuple)
self.assertEqual(len(chunks), len(self.data.shape))
def test_cuda_aware_mpi(self):
self.assertTrue(hasattr(ht.communication, "CUDA_AWARE_MPI"))
self.assertIsInstance(ht.communication.CUDA_AWARE_MPI, bool)
def test_contiguous_memory_buffer(self):
# vector heat tensor
vector_data = ht.arange(1, 10, device=ht_device)
vector_out = ht.zeros_like(vector_data, device=ht_device)
# test that target and destination are not equal
self.assertTrue((vector_data._DNDarray__array != vector_out._DNDarray__array).all())
self.assertTrue(vector_data._DNDarray__array.is_contiguous())
self.assertTrue(vector_out._DNDarray__array.is_contiguous())
# send message to self that is received into a separate buffer afterwards
req = vector_data.comm.Isend(vector_data, dest=vector_data.comm.rank)
vector_out.comm.Recv(vector_out, source=vector_out.comm.rank)
req.Wait()
# check that after sending the data everything is equal
self.assertTrue((vector_data._DNDarray__array == vector_out._DNDarray__array).all())
self.assertTrue(vector_out._DNDarray__array.is_contiguous())
# multi-dimensional torch tensor
tensor_data = torch.arange(3 * 4 * 5 * 6, device=device).reshape(3, 4, 5, 6) + 1
tensor_out = torch.zeros_like(tensor_data, device=device)
# test that target and destination are not equal
self.assertTrue((tensor_data != tensor_out).all())
self.assertTrue(tensor_data.is_contiguous())
self.assertTrue(tensor_out.is_contiguous())
# send message to self that is received into a separate buffer afterwards
comm = ht.core.communication.MPI_WORLD
req = comm.Isend(tensor_data, dest=comm.rank)
comm.Recv(tensor_out, source=comm.rank)
req.Wait()
# check that after sending the data everything is equal
self.assertTrue((tensor_data == tensor_out).all())
self.assertTrue(tensor_out.is_contiguous())
def test_non_contiguous_memory_buffer(self):
# non-contiguous source
non_contiguous_data = ht.ones((3, 2), device=ht_device).T
contiguous_out = ht.zeros_like(non_contiguous_data, device=ht_device)
# test that target and destination are not equal
self.assertTrue(
(non_contiguous_data._DNDarray__array != contiguous_out._DNDarray__array).all()
)
self.assertFalse(non_contiguous_data._DNDarray__array.is_contiguous())
self.assertTrue(contiguous_out._DNDarray__array.is_contiguous())
# send message to self that is received into a separate buffer afterwards
req = non_contiguous_data.comm.Isend(
non_contiguous_data, dest=non_contiguous_data.comm.rank
)
contiguous_out.comm.Recv(contiguous_out, source=contiguous_out.comm.rank)
req.Wait()
# check that after sending the data everything is equal
self.assertTrue(
(non_contiguous_data._DNDarray__array == contiguous_out._DNDarray__array).all()
)
if ht.get_device().device_type == "cpu" or ht.communication.CUDA_AWARE_MPI:
self.assertTrue(contiguous_out._DNDarray__array.is_contiguous())
# non-contiguous destination
contiguous_data = ht.ones((3, 2), device=ht_device)
non_contiguous_out = ht.zeros((2, 3), device=ht_device).T
# test that target and destination are not equal
self.assertTrue(
(contiguous_data._DNDarray__array != non_contiguous_out._DNDarray__array).all()
)
self.assertTrue(contiguous_data._DNDarray__array.is_contiguous())
self.assertFalse(non_contiguous_out._DNDarray__array.is_contiguous())
# send message to self that is received into a separate buffer afterwards
req = contiguous_data.comm.Isend(contiguous_data, dest=contiguous_data.comm.rank)
non_contiguous_out.comm.Recv(non_contiguous_out, source=non_contiguous_out.comm.rank)
req.Wait()
# check that after sending the data everything is equal
self.assertTrue(
(contiguous_data._DNDarray__array == non_contiguous_out._DNDarray__array).all()
)
if ht.get_device().device_type == "cpu" or ht.communication.CUDA_AWARE_MPI:
self.assertFalse(non_contiguous_out._DNDarray__array.is_contiguous())
# non-contiguous destination
both_non_contiguous_data = ht.ones((3, 2), device=ht_device).T
both_non_contiguous_out = ht.zeros((3, 2), device=ht_device).T
# test that target and destination are not equal
self.assertTrue(
(
both_non_contiguous_data._DNDarray__array
!= both_non_contiguous_out._DNDarray__array
).all()
)
self.assertFalse(both_non_contiguous_data._DNDarray__array.is_contiguous())
self.assertFalse(both_non_contiguous_out._DNDarray__array.is_contiguous())
# send message to self that is received into a separate buffer afterwards
req = both_non_contiguous_data.comm.Isend(
both_non_contiguous_data, dest=both_non_contiguous_data.comm.rank
)
both_non_contiguous_out.comm.Recv(
both_non_contiguous_out, source=both_non_contiguous_out.comm.rank
)
req.Wait()
# check that after sending the data everything is equal
self.assertTrue(
(
both_non_contiguous_data._DNDarray__array
== both_non_contiguous_out._DNDarray__array
).all()
)
if ht.get_device().device_type == "cpu" or ht.communication.CUDA_AWARE_MPI:
self.assertFalse(both_non_contiguous_out._DNDarray__array.is_contiguous())
def test_default_comm(self):
# default comm is world
a = ht.zeros((4, 5), device=ht_device)
self.assertIs(ht.get_comm(), ht.MPI_WORLD)
self.assertIs(a.comm, ht.MPI_WORLD)
# we can set a new comm that is being used for new allocation, old are not affected
ht.use_comm(ht.MPI_SELF)
b = ht.zeros((4, 5), device=ht_device)
self.assertIs(ht.get_comm(), ht.MPI_SELF)
self.assertIs(b.comm, ht.MPI_SELF)
self.assertIsNot(a.comm, ht.MPI_SELF)
# reset the comm
ht.use_comm(ht.MPI_WORLD)
# test for proper sanitation
with self.assertRaises(TypeError):
ht.use_comm("1")
def test_allgather(self):
# contiguous data
data = ht.ones((1, 7), device=ht_device)
output = ht.zeros((ht.MPI_WORLD.size, 7), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Allgather(data, output)
# check result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(ht.MPI_WORLD.size, 7, device=device)).all()
)
# contiguous data, different gather axis
data = ht.ones((7, 2), dtype=ht.float64, device=ht_device)
output = ht.random.randn(7, 2 * ht.MPI_WORLD.size, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Allgather(data, output, recv_axis=1)
# check result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(7, 2 * ht.MPI_WORLD.size, device=device)).all()
)
# non-contiguous data
data = ht.ones((4, 5), device=ht_device).T
output = ht.zeros((5, 4 * ht.MPI_WORLD.size), device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Allgather(data, output)
# check result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(5, 4 * ht.MPI_WORLD.size, device=device)).all()
)
# non-contiguous output, different gather axis
data = ht.ones((5, 7), device=ht_device)
output = ht.zeros((7 * ht.MPI_WORLD.size, 5), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
data.comm.Allgather(data, output, recv_axis=1)
# check result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(5, 7 * ht.MPI_WORLD.size, device=device)).all()
)
# contiguous data
data = ht.array([[ht.MPI_WORLD.rank] * 10], device=ht_device)
output = ht.array([[0] * 10] * ht.MPI_WORLD.size, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the allgather operation
data.comm.Allgather(data, output, recv_axis=0)
# check result
result = ht.array([np.arange(0, ht.MPI_WORLD.size)] * 10, device=ht_device).T
self.assertTrue(ht.equal(output, result))
# contiguous data
data = ht.array([[ht.MPI_WORLD.rank]] * 10, device=ht_device)
output = ht.array([[0] * ht.MPI_WORLD.size] * 10, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the allgather operation
data.comm.Allgather(data, output, recv_axis=1)
# check result
result = ht.array([np.arange(0, ht.MPI_WORLD.size)] * 10, device=ht_device)
self.assertTrue(ht.equal(output, result))
# other datatypes (send numpy array)
data = np.array([ht.MPI_WORLD.rank] * 3)
output = ht.array([[0] * 3] * ht.MPI_WORLD.size, device=ht_device)
# perform the allgather operation
ht.MPI_WORLD.Allgatherv(data, output)
# check result
result = ht.array([np.arange(0, ht.MPI_WORLD.size)] * 3, device=ht_device).T
self.assertTrue(ht.equal(output, result))
data = ht.array([ht.MPI_WORLD.rank] * 3, device=ht_device)
output = np.array([[0] * 3] * ht.MPI_WORLD.size)
# perform the allgather operation
ht.MPI_WORLD.Allgatherv(data, output)
# check result
result = np.array([np.arange(0, ht.MPI_WORLD.size)] * 3).T
self.assertTrue((output == result).all())
with self.assertRaises(TypeError):
data = np.array([ht.MPI_WORLD.rank] * 3, device=ht_device)
output = ht.array([[0] * 3 * ht.MPI_WORLD.size], device=ht_device)
ht.MPI_WORLD.Allgatherv(data, output, recv_axis=1)
with self.assertRaises(TypeError):
data = ht.array([ht.MPI_WORLD.rank] * 3, device=ht_device)
output = np.array([[0] * 3 * ht.MPI_WORLD.size], device=ht_device)
ht.MPI_WORLD.Allgatherv(data, output, recv_axis=1)
def test_allgatherv(self):
# contiguous data buffer, contiguous output buffer
data = ht.ones((ht.MPI_WORLD.rank + 1, 10), device=ht_device)
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1) // 2
output = ht.zeros((output_count, 10), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the allgather operation
counts = tuple(range(1, ht.MPI_WORLD.size + 1))
displs = tuple(np.cumsum(range(ht.MPI_WORLD.size)))
data.comm.Allgatherv(data, (output, counts, displs))
# check result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# non-contiguous data buffer, contiguous output buffer
data = ht.ones((10, 2 * (ht.MPI_WORLD.rank + 1)), device=ht_device).T
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((output_count, 10), device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the allgather operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Allgatherv(data, (output, counts, displs))
# check result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# contiguous data buffer, non-contiguous output buffer
data = ht.ones((2 * (ht.MPI_WORLD.rank + 1), 10), device=ht_device)
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((10, output_count), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the allgather operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Allgatherv(data, (output, counts, displs))
# check result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# non-contiguous data buffer, non-contiguous output buffer
data = ht.ones((10, 2 * (ht.MPI_WORLD.rank + 1)), device=ht_device).T
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((10, output_count), device=ht_device).T
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the allgather operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Allgatherv(data, (output, counts, displs))
# check result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# contiguous data buffer
data = ht.array([[ht.MPI_WORLD.rank] * 10] * (ht.MPI_WORLD.size + 1), device=ht_device)
# contiguous output buffer
output_shape = data.lshape
output = ht.zeros(output_shape, dtype=ht.int64, device=ht_device)
# Results for comparison
first_line = ht.array([[0] * 10], device=ht_device)
last_line = ht.array([[ht.MPI_WORLD.size - 1] * 10], device=ht_device)
# perform allgather operation
send_counts, send_displs, _ = data.comm.counts_displs_shape(data.lshape, 0)
recv_counts, recv_displs, _ = data.comm.counts_displs_shape(output.lshape, 0)
data.comm.Allgatherv((data, send_counts, send_displs), (output, recv_counts, recv_displs))
# check result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue((output[0] == first_line).all())
self.assertTrue((output[output.lshape[0] - 1] == last_line).all())
def test_allreduce(self):
# contiguous data
data = ht.ones((10, 2), dtype=ht.int8, device=ht_device)
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
data.comm.Allreduce(data, out, op=ht.MPI.SUM)
# check the reduction result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# non-contiguous data
data = ht.ones((10, 2), dtype=ht.int8, device=ht_device).T
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
data.comm.Allreduce(data, out, op=ht.MPI.SUM)
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch storage
# consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# non-contiguous output
data = ht.ones((10, 2), dtype=ht.int8, device=ht_device)
out = ht.zeros((2, 10), dtype=ht.int8, device=ht_device).T
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(out._DNDarray__array.is_contiguous())
data.comm.Allreduce(data, out, op=ht.MPI.SUM)
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch storage
# consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.size).all())
def test_alltoall(self):
# contiguous data
data = ht.array([[ht.MPI_WORLD.rank] * 10] * ht.MPI_WORLD.size, device=ht_device)
output = ht.zeros((ht.MPI_WORLD.size, 10), dtype=ht.int64, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Alltoall(data, output)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(ht.MPI_WORLD.size, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# contiguous data, different gather axis
data = ht.array([[ht.MPI_WORLD.rank] * ht.MPI_WORLD.size] * 10)
output = ht.zeros((10, ht.MPI_WORLD.size), dtype=ht.int64, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Alltoall(data, output, send_axis=1)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device).repeat(10).reshape(10, ht.MPI_WORLD.size)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# non-contiguous data
data = ht.ones((10, 2 * ht.MPI_WORLD.size), dtype=ht.int64, device=ht_device).T
output = ht.zeros((2 * ht.MPI_WORLD.size, 10), dtype=ht.int64, device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Alltoall(data, output)
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
comparison = torch.ones((2 * ht.MPI_WORLD.size, 10), dtype=torch.int64, device=device)
self.assertTrue((output._DNDarray__array == comparison).all())
# non-contiguous output, different gather axis
data = ht.ones((10, 2 * ht.MPI_WORLD.size), dtype=ht.int64, device=ht_device)
output = ht.zeros((2 * ht.MPI_WORLD.size, 10), dtype=ht.int64, device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
data.comm.Alltoall(data, output, send_axis=1)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
comparison = torch.ones((10, 2 * ht.MPI_WORLD.size), dtype=torch.int64, device=device)
self.assertTrue((output._DNDarray__array == comparison).all())
with self.assertRaises(TypeError):
data = np.array([ht.MPI_WORLD.rank] * 3, device=ht_device)
output = ht.array([[0] * 3 * ht.MPI_WORLD.size], device=ht_device)
ht.MPI_WORLD.Alltoall(data, output, send_axis=1)
with self.assertRaises(TypeError):
data = ht.array([ht.MPI_WORLD.rank] * 3, device=ht_device)
output = np.array([[0] * 3 * ht.MPI_WORLD.size])
ht.MPI_WORLD.Alltoall(data, output, send_axis=1)
def test_alltoallv(self):
# contiguous data buffer
data = ht.array([[ht.MPI_WORLD.rank] * 10] * (ht.MPI_WORLD.size + 1), device=ht_device)
send_counts, send_displs, output_shape = data.comm.counts_displs_shape(data.lshape, 0)
# contiguous output buffer
output = ht.zeros(output_shape, dtype=ht.int64, device=ht_device)
recv_counts, recv_displs, _ = data.comm.counts_displs_shape(output.lshape, 0)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if ht.MPI_WORLD.size != 1:
self.assertNotEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
else:
self.assertEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
data.comm.Alltoallv((data, send_counts, send_displs), (output, recv_counts, recv_displs))
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
stack_count = output_shape[0] // ht.MPI_WORLD.size * 10
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(-1, stack_count)
.reshape(-1, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# non-contiguous data buffer
data = ht.array([[ht.MPI_WORLD.rank] * (ht.MPI_WORLD.size + 1)] * 10, device=ht_device).T
send_counts, send_displs, output_shape = data.comm.counts_displs_shape(data.lshape, 0)
# contiguous output buffer
output = ht.zeros(output_shape, dtype=ht.int64, device=ht_device)
recv_counts, recv_displs, _ = data.comm.counts_displs_shape(output.lshape, 0)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if ht.MPI_WORLD.size != 1:
self.assertNotEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
else:
self.assertEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
data.comm.Alltoallv((data, send_counts, send_displs), (output, recv_counts, recv_displs))
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
stack_count = output_shape[0] // ht.MPI_WORLD.size * 10
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(-1, stack_count)
.reshape(-1, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# contiguous data buffer
data = ht.array([[ht.MPI_WORLD.rank] * 10] * (ht.MPI_WORLD.size + 1), device=ht_device)
send_counts, send_displs, output_shape = data.comm.counts_displs_shape(data.lshape, 0)
# non-contiguous output buffer
output_shape = tuple(reversed(output_shape))
output = ht.zeros(output_shape, dtype=ht.int64, device=ht_device).T
recv_counts, recv_displs, _ = data.comm.counts_displs_shape(output.lshape, 0)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if ht.MPI_WORLD.size != 1:
self.assertNotEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
else:
self.assertEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
data.comm.Alltoallv((data, send_counts, send_displs), (output, recv_counts, recv_displs))
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
stack_count = output_shape[1] // ht.MPI_WORLD.size * 10
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(-1, stack_count)
.reshape(-1, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# non-contiguous data buffer
data = ht.array([[ht.MPI_WORLD.rank] * (ht.MPI_WORLD.size + 1)] * 10, device=ht_device).T
send_counts, send_displs, output_shape = data.comm.counts_displs_shape(data.lshape, 0)
# non-contiguous output buffer
output_shape = tuple(reversed(output_shape))
output = ht.zeros(output_shape, dtype=ht.int64, device=ht_device).T
recv_counts, recv_displs, _ = data.comm.counts_displs_shape(output.lshape, 0)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if ht.MPI_WORLD.size != 1:
self.assertNotEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
else:
self.assertEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
data.comm.Alltoallv((data, send_counts, send_displs), (output, recv_counts, recv_displs))
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
stack_count = output_shape[1] // ht.MPI_WORLD.size * 10
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(-1, stack_count)
.reshape(-1, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
def test_bcast(self):
# contiguous data
data = ht.arange(10, dtype=ht.int64, device=ht_device)
if ht.MPI_WORLD.rank != 0:
data = ht.zeros_like(data, dtype=ht.int64, device=ht_device)
# broadcast data to all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
data.comm.Bcast(data, root=0)
# assert output is equal
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue((data._DNDarray__array == torch.arange(10, device=device)).all())
# non-contiguous data
data = ht.ones((2, 5), dtype=ht.float32, device=ht_device).T
if ht.MPI_WORLD.rank != 0:
data = ht.zeros((2, 5), dtype=ht.float32, device=ht_device).T
# broadcast data to all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
data.comm.Bcast(data, root=0)
# assert output is equal
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(
(data._DNDarray__array == torch.ones((5, 2), dtype=torch.float32, device=device)).all()
)
def test_exscan(self):
# contiguous data
data = ht.ones((5, 3), dtype=ht.int64, device=ht_device)
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
data.comm.Exscan(data, out)
# check the reduction result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank).all())
# non-contiguous data
data = ht.ones((5, 3), dtype=ht.int64, device=ht_device).T
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
data.comm.Exscan(data, out)
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank).all())
# non-contiguous output
data = ht.ones((5, 3), dtype=ht.int64, device=ht_device)
out = ht.zeros((3, 5), dtype=ht.int64, device=ht_device).T
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(out._DNDarray__array.is_contiguous())
data.comm.Exscan(data, out)
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch storage
# consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank).all())
def test_gather(self):
# contiguous data
data = ht.ones((1, 5), device=ht_device)
output = ht.zeros((ht.MPI_WORLD.size, 5), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Gather(data, output, root=0)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(output._DNDarray__array == torch.ones(ht.MPI_WORLD.size, 5, device=device)).all()
)
# contiguous data, different gather axis
data = ht.ones((5, 2), device=ht_device)
output = ht.zeros((5, 2 * ht.MPI_WORLD.size), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Gather(data, output, root=0, axis=1)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(
output._DNDarray__array == torch.ones(5, 2 * ht.MPI_WORLD.size, device=device)
).all()
)
# non-contiguous data
data = ht.ones((3, 5), device=ht_device).T
output = ht.zeros((5, 3 * ht.MPI_WORLD.size), device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Gather(data, output, root=0)
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(
output._DNDarray__array == torch.ones(5, 3 * ht.MPI_WORLD.size, device=device)
).all()
)
# non-contiguous output, different gather axis
data = ht.ones((5, 3), device=ht_device)
output = ht.zeros((3 * ht.MPI_WORLD.size, 5), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
data.comm.Gather(data, output, root=0, axis=1)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(
output._DNDarray__array == torch.ones(5, 3 * ht.MPI_WORLD.size, device=device)
).all()
)
def test_gatherv(self):
# contiguous data buffer, contiguous output buffer
data = ht.ones((ht.MPI_WORLD.rank + 1, 10), device=ht_device)
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1) // 2
output = ht.zeros((output_count, 10), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(1, ht.MPI_WORLD.size + 1))
displs = tuple(np.cumsum(range(ht.MPI_WORLD.size)))
data.comm.Gatherv(data, (output, counts, displs), root=0)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# non-contiguous data buffer, contiguous output buffer
data = ht.ones((10, 2 * (ht.MPI_WORLD.rank + 1)), device=ht_device).T
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((output_count, 10), device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Gatherv(data, (output, counts, displs), root=0)
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# contiguous data buffer, non-contiguous output buffer
data = ht.ones((2 * (ht.MPI_WORLD.rank + 1), 10), device=ht_device)
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((10, output_count), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Gatherv(data, (output, counts, displs), root=0)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# non-contiguous data buffer, non-contiguous output buffer
data = ht.ones((10, 2 * (ht.MPI_WORLD.rank + 1)), device=ht_device).T
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((10, output_count), device=ht_device).T
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Gatherv(data, (output, counts, displs), root=0)
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
def test_iallgather(self):
try:
# contiguous data
data = ht.ones((1, 7), device=ht_device)
output = ht.zeros((ht.MPI_WORLD.size, 7), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Iallgather(data, output)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(ht.MPI_WORLD.size, 7, device=device)).all()
)
# contiguous data, different gather axis
data = ht.ones((7, 2), dtype=ht.float64, device=ht_device)
output = ht.random.randn(7, 2 * ht.MPI_WORLD.size, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Iallgather(data, output, recv_axis=1)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(
output._DNDarray__array == torch.ones(7, 2 * ht.MPI_WORLD.size, device=device)
).all()
)
# non-contiguous data
data = ht.ones((4, 5), device=ht_device).T
output = ht.zeros((5, 4 * ht.MPI_WORLD.size), device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Iallgather(data, output)
req.wait()
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(
output._DNDarray__array == torch.ones(5, 4 * ht.MPI_WORLD.size, device=device)
).all()
)
# non-contiguous output, different gather axis
data = ht.ones((5, 7), device=ht_device)
output = ht.zeros((7 * ht.MPI_WORLD.size, 5), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
req = data.comm.Iallgather(data, output, recv_axis=1)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(
output._DNDarray__array == torch.ones(5, 7 * ht.MPI_WORLD.size, device=device)
).all()
)
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_iallgatherv(self):
try:
# contiguous data buffer, contiguous output buffer
data = ht.ones((ht.MPI_WORLD.rank + 1, 10), device=ht_device)
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1) // 2
output = ht.zeros((output_count, 10), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(1, ht.MPI_WORLD.size + 1))
displs = tuple(np.cumsum(range(ht.MPI_WORLD.size)))
req = data.comm.Iallgatherv(data, (output, counts, displs))
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# non-contiguous data buffer, contiguous output buffer
data = ht.ones((10, 2 * (ht.MPI_WORLD.rank + 1)), device=ht_device).T
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((output_count, 10), device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Iallgatherv(data, (output, counts, displs))
req.wait()
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# contiguous data buffer, non-contiguous output buffer
data = ht.ones((2 * (ht.MPI_WORLD.rank + 1), 10), device=ht_device)
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((10, output_count), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Iallgatherv(data, (output, counts, displs))
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# non-contiguous data buffer, non-contiguous output buffer
data = ht.ones((10, 2 * (ht.MPI_WORLD.rank + 1)), device=ht_device).T
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((10, output_count), device=ht_device).T
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Iallgatherv(data, (output, counts, displs))
req.wait()
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_iallreduce(self):
try:
# contiguous data
data = ht.ones((10, 2), dtype=ht.int8, device=ht_device)
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
req = data.comm.Iallreduce(data, out, op=ht.MPI.SUM)
req.wait()
# check the reduction result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# non-contiguous data
data = ht.ones((10, 2), dtype=ht.int8, device=ht_device).T
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
req = data.comm.Iallreduce(data, out, op=ht.MPI.SUM)
req.wait()
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# non-contiguous output
data = ht.ones((10, 2), dtype=ht.int8, device=ht_device)
out = ht.zeros((2, 10), dtype=ht.int8, device=ht_device).T
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(out._DNDarray__array.is_contiguous())
req = data.comm.Iallreduce(data, out, op=ht.MPI.SUM)
req.wait()
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_ialltoall(self):
try:
# contiguous data
data = ht.array([[ht.MPI_WORLD.rank] * 10] * ht.MPI_WORLD.size, device=ht_device)
output = ht.zeros((ht.MPI_WORLD.size, 10), dtype=ht.int64, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Ialltoall(data, output)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(ht.MPI_WORLD.size, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# contiguous data, different gather axis
data = ht.array([[ht.MPI_WORLD.rank] * ht.MPI_WORLD.size] * 10, device=ht_device)
output = ht.zeros((10, ht.MPI_WORLD.size), dtype=ht.int64, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Ialltoall(data, output, send_axis=1)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.repeat(10)
.reshape(10, ht.MPI_WORLD.size)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# non-contiguous data
data = ht.ones((10, 2 * ht.MPI_WORLD.size), dtype=ht.int64, device=ht_device).T
output = ht.zeros((2 * ht.MPI_WORLD.size, 10), dtype=ht.int64, device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Ialltoall(data, output)
req.wait()
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
comparison = torch.ones((2 * ht.MPI_WORLD.size, 10), dtype=torch.int64, device=device)
self.assertTrue((output._DNDarray__array == comparison).all())
# non-contiguous output, different gather axis
data = ht.ones((10, 2 * ht.MPI_WORLD.size), dtype=ht.int64, device=ht_device)
output = ht.zeros((2 * ht.MPI_WORLD.size, 10), dtype=ht.int64, device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
req = data.comm.Ialltoall(data, output, send_axis=1)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
comparison = torch.ones((10, 2 * ht.MPI_WORLD.size), dtype=torch.int64, device=device)
self.assertTrue((output._DNDarray__array == comparison).all())
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_ialltoallv(self):
try:
# contiguous data buffer
data = ht.array([[ht.MPI_WORLD.rank] * 10] * (ht.MPI_WORLD.size + 1), device=ht_device)
send_counts, send_displs, output_shape = data.comm.counts_displs_shape(data.lshape, 0)
# contiguous output buffer
output = ht.zeros(output_shape, dtype=ht.int64, device=ht_device)
recv_counts, recv_displs, _ = data.comm.counts_displs_shape(output.lshape, 0)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if ht.MPI_WORLD.size != 1:
self.assertNotEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
else:
self.assertEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
req = data.comm.Ialltoallv(
(data, send_counts, send_displs), (output, recv_counts, recv_displs)
)
req.wait()
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
stack_count = output_shape[0] // ht.MPI_WORLD.size * 10
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(-1, stack_count)
.reshape(-1, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# non-contiguous data buffer
data = ht.array(
[[ht.MPI_WORLD.rank] * (ht.MPI_WORLD.size + 1)] * 10, device=ht_device
).T
send_counts, send_displs, output_shape = data.comm.counts_displs_shape(data.lshape, 0)
# contiguous output buffer
output = ht.zeros(output_shape, dtype=ht.int64, device=ht_device)
recv_counts, recv_displs, _ = data.comm.counts_displs_shape(output.lshape, 0)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if ht.MPI_WORLD.size != 1:
self.assertNotEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
else:
self.assertEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
req = data.comm.Ialltoallv(
(data, send_counts, send_displs), (output, recv_counts, recv_displs)
)
req.wait()
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
stack_count = output_shape[0] // ht.MPI_WORLD.size * 10
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(-1, stack_count)
.reshape(-1, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# contiguous data buffer
data = ht.array([[ht.MPI_WORLD.rank] * 10] * (ht.MPI_WORLD.size + 1), device=ht_device)
send_counts, send_displs, output_shape = data.comm.counts_displs_shape(data.lshape, 0)
# non-contiguous output buffer
output_shape = tuple(reversed(output_shape))
output = ht.zeros(output_shape, dtype=ht.int64, device=ht_device).T
recv_counts, recv_displs, _ = data.comm.counts_displs_shape(output.lshape, 0)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if ht.MPI_WORLD.size != 1:
self.assertNotEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
else:
self.assertEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
req = data.comm.Ialltoallv(
(data, send_counts, send_displs), (output, recv_counts, recv_displs)
)
req.wait()
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
stack_count = output_shape[1] // ht.MPI_WORLD.size * 10
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(-1, stack_count)
.reshape(-1, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# non-contiguous data buffer
data = ht.array(
[[ht.MPI_WORLD.rank] * (ht.MPI_WORLD.size + 1)] * 10, device=ht_device
).T
send_counts, send_displs, output_shape = data.comm.counts_displs_shape(data.lshape, 0)
# non-contiguous output buffer
output_shape = tuple(reversed(output_shape))
output = ht.zeros(output_shape, dtype=ht.int64, device=ht_device).T
recv_counts, recv_displs, _ = data.comm.counts_displs_shape(output.lshape, 0)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if ht.MPI_WORLD.size != 1:
self.assertNotEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
else:
self.assertEqual(data.shape[0] % ht.MPI_WORLD.size, 0)
req = data.comm.Ialltoallv(
(data, send_counts, send_displs), (output, recv_counts, recv_displs)
)
req.wait()
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
stack_count = output_shape[1] // ht.MPI_WORLD.size * 10
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.expand(-1, stack_count)
.reshape(-1, 10)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_ibcast(self):
try:
# contiguous data
data = ht.arange(10, dtype=ht.int64, device=ht_device)
if ht.MPI_WORLD.rank != 0:
data = ht.zeros_like(data, dtype=ht.int64, device=ht_device)
# broadcast data to all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
req = data.comm.Ibcast(data, root=0)
req.wait()
# assert output is equal
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue((data._DNDarray__array == torch.arange(10, device=device)).all())
# non-contiguous data
data = ht.ones((2, 5), dtype=ht.float32, device=ht_device).T
if ht.MPI_WORLD.rank != 0:
data = ht.zeros((2, 5), dtype=ht.float32, device=ht_device).T
# broadcast data to all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
req = data.comm.Ibcast(data, root=0)
req.wait()
# assert output is equal
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(
(
data._DNDarray__array == torch.ones((5, 2), dtype=torch.float32, device=device)
).all()
)
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_iexscan(self):
try:
# contiguous data
data = ht.ones((5, 3), dtype=ht.int64, device=ht_device)
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
req = data.comm.Iexscan(data, out)
req.wait()
# check the reduction result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank).all())
# non-contiguous data
data = ht.ones((5, 3), dtype=ht.int64, device=ht_device).T
out = ht.zeros_like(data)
# reduce across all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
req = data.comm.Iexscan(data, out)
req.wait()
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank).all())
# non-contiguous output
data = ht.ones((5, 3), dtype=ht.int64, device=ht_device)
out = ht.zeros((3, 5), dtype=ht.int64, device=ht_device).T
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(out._DNDarray__array.is_contiguous())
req = data.comm.Iexscan(data, out)
req.wait()
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank).all())
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_igather(self):
try:
# contiguous data
data = ht.ones((1, 5), dtype=ht.float64, device=ht_device)
output = ht.random.randn(ht.MPI_WORLD.size, 5, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Igather(data, output, root=0)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(
output._DNDarray__array
== torch.ones((ht.MPI_WORLD.size, 5), dtype=torch.float32, device=device)
).all()
)
# contiguous data, different gather axis
data = ht.ones((5, 2), dtype=ht.float64, device=ht_device)
output = ht.random.randn(5, 2 * ht.MPI_WORLD.size, device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Igather(data, output, root=0, axis=1)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(
output._DNDarray__array
== torch.ones(
(5, 2 * ht.MPI_WORLD.size), dtype=torch.float32, device=device
)
).all()
)
# non-contiguous data
data = ht.ones((3, 5), dtype=ht.float64, device=ht_device).T
output = ht.random.randn(5, 3 * ht.MPI_WORLD.size, device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Igather(data, output, root=0)
req.wait()
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(
output._DNDarray__array
== torch.ones(
(5, 3 * ht.MPI_WORLD.size), dtype=torch.float32, device=device
)
).all()
)
# non-contiguous output, different gather axis
data = ht.ones((5, 3), dtype=ht.float64, device=ht_device)
output = ht.random.randn(3 * ht.MPI_WORLD.size, 5, device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
req = data.comm.Igather(data, output, root=0, axis=1)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(
output._DNDarray__array
== torch.ones(
(5, 3 * ht.MPI_WORLD.size), dtype=torch.float32, device=device
)
).all()
)
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_igatherv(self):
try:
# contiguous data buffer, contiguous output buffer
data = ht.ones((ht.MPI_WORLD.rank + 1, 10), device=ht_device)
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1) // 2
output = ht.zeros((output_count, 10), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(1, ht.MPI_WORLD.size + 1))
displs = tuple(np.cumsum(range(ht.MPI_WORLD.size)))
req = data.comm.Igatherv(data, (output, counts, displs), root=0)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# non-contiguous data buffer, contiguous output buffer
data = ht.ones((10, 2 * (ht.MPI_WORLD.rank + 1)), device=ht_device).T
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((output_count, 10), device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Igatherv(data, (output, counts, displs), root=0)
req.wait()
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# contiguous data buffer, non-contiguous output buffer
data = ht.ones((2 * (ht.MPI_WORLD.rank + 1), 10), device=ht_device)
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((10, output_count), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Igatherv(data, (output, counts, displs), root=0)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# non-contiguous data buffer, non-contiguous output buffer
data = ht.ones((10, 2 * (ht.MPI_WORLD.rank + 1)), device=ht_device).T
output_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
output = ht.zeros((10, output_count), device=ht_device).T
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Igatherv(data, (output, counts, displs), root=0)
req.wait()
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 10, device=device)).all()
)
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_ireduce(self):
try:
# contiguous data
data = ht.ones((10, 2), dtype=ht.int32, device=ht_device)
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
req = data.comm.Ireduce(data, out, op=ht.MPI.SUM, root=0)
req.wait()
# check the reduction result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# non-contiguous data
data = ht.ones((10, 2), dtype=ht.int32, device=ht_device).T
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
req = data.comm.Ireduce(data, out, op=ht.MPI.SUM, root=0)
req.wait()
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# non-contiguous output
data = ht.ones((10, 2), dtype=ht.int32, device=ht_device)
out = ht.zeros((2, 10), dtype=ht.int32, device=ht_device).T
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(out._DNDarray__array.is_contiguous())
req = data.comm.Ireduce(data, out, op=ht.MPI.SUM, root=0)
req.wait()
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_iscan(self):
try:
# contiguous data
data = ht.ones((5, 3), dtype=ht.float64, device=ht_device)
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
req = data.comm.Iscan(data, out)
req.wait()
# check the reduction result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank + 1).all())
# non-contiguous data
data = ht.ones((5, 3), dtype=ht.float64, device=ht_device).T
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
req = data.comm.Iscan(data, out)
req.wait()
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank + 1).all())
# non-contiguous output
data = ht.ones((5, 3), dtype=ht.float64, device=ht_device)
out = ht.zeros((3, 5), dtype=ht.float64, device=ht_device).T
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(out._DNDarray__array.is_contiguous())
req = data.comm.Iscan(data, out)
req.wait()
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank + 1).all())
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_iscatter(self):
try:
# contiguous data
if ht.MPI_WORLD.rank == 0:
data = ht.ones((ht.MPI_WORLD.size, 5), device=ht_device)
else:
data = ht.zeros((1,), device=ht_device)
output = ht.zeros((1, 5), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Iscatter(data, output, root=0)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue((output._DNDarray__array == torch.ones(1, 5, device=device)).all())
# contiguous data, different scatter axis
if ht.MPI_WORLD.rank == 0:
data = ht.ones((5, ht.MPI_WORLD.size), device=ht_device)
else:
data = ht.zeros((1,), device=ht_device)
output = ht.zeros((5, 1), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Iscatter(data, output, root=0, axis=1)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue((output._DNDarray__array == torch.ones(5, 1, device=device)).all())
# non-contiguous data
if ht.MPI_WORLD.rank == 0:
data = ht.ones((5, ht.MPI_WORLD.size * 2), device=ht_device).T
self.assertFalse(data._DNDarray__array.is_contiguous())
else:
data = ht.zeros((1,), device=ht_device)
self.assertTrue(data._DNDarray__array.is_contiguous())
output = ht.zeros((2, 5), device=ht_device)
# ensure prior invariants
self.assertTrue(output._DNDarray__array.is_contiguous())
req = data.comm.Iscatter(data, output, root=0)
req.wait()
# check scatter result
if ht.MPI_WORLD.rank == 0:
self.assertFalse(data._DNDarray__array.is_contiguous())
else:
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue((output._DNDarray__array == torch.ones(2, 5, device=device)).all())
# non-contiguous destination, different split axis
if ht.MPI_WORLD.rank == 0:
data = ht.ones((5, ht.MPI_WORLD.size * 2), device=ht_device)
else:
data = ht.zeros((1,), device=ht_device)
output = ht.zeros((2, 5), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
req = data.comm.Iscatter(data, output, root=0, axis=1)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue((output._DNDarray__array == torch.ones(5, 2, device=device)).all())
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_iscatterv(self):
try:
# contiguous data buffer, contiguous output buffer
input_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
data = ht.ones((input_count, 12), device=ht_device)
output_count = 2 * (ht.MPI_WORLD.rank + 1)
output = ht.zeros((output_count, 12), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Iscatterv((data, counts, displs), output, root=0)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 12, device=device)).all()
)
# non-contiguous data buffer, contiguous output buffer
input_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
data = ht.ones((12, input_count), device=ht_device).T
output_count = 2 * (ht.MPI_WORLD.rank + 1)
output = ht.zeros((output_count, 12), device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Iscatterv((data, counts, displs), output, root=0)
req.wait()
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 12, device=device)).all()
)
# contiguous data buffer, non-contiguous output buffer
input_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
data = ht.ones((input_count, 12), device=ht_device)
output_count = 2 * (ht.MPI_WORLD.rank + 1)
output = ht.zeros((12, output_count), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Iscatterv((data, counts, displs), output, root=0)
req.wait()
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 12, device=device)).all()
)
# non-contiguous data buffer, non-contiguous output buffer
input_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
data = ht.ones((12, input_count), device=ht_device).T
output_count = 2 * (ht.MPI_WORLD.rank + 1)
output = ht.zeros((12, output_count), device=ht_device).T
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
req = data.comm.Iscatterv((data, counts, displs), output, root=0)
req.wait()
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 12, device=device)).all()
)
# MPI implementation may not support asynchronous operations
except NotImplementedError:
pass
def test_mpi_in_place(self):
size = ht.MPI_WORLD.size
data = ht.ones((size, size), dtype=ht.int32, device=ht_device)
data.comm.Allreduce(ht.MPI.IN_PLACE, data, op=ht.MPI.SUM)
self.assertTrue((data._DNDarray__array == size).all())
# MPI Inplace is not allowed for AllToAll
def test_reduce(self):
# contiguous data
data = ht.ones((10, 2), dtype=ht.int32, device=ht_device)
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
data.comm.Reduce(data, out, op=ht.MPI.SUM, root=0)
# check the reduction result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# non-contiguous data
data = ht.ones((10, 2), dtype=ht.int32, device=ht_device).T
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
data.comm.Reduce(data, out, op=ht.MPI.SUM, root=0)
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch
# storage consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue((out._DNDarray__array == data.comm.size).all())
# non-contiguous output
data = ht.ones((10, 2), dtype=ht.int32, device=ht_device)
out = ht.zeros((2, 10), dtype=ht.int32, device=ht_device).T
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(out._DNDarray__array.is_contiguous())
data.comm.Reduce(data, out, op=ht.MPI.SUM, root=0)
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch storage
# consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
if data.comm.rank == 0:
self.assertTrue((out._DNDarray__array == data.comm.size).all())
def test_scan(self):
# contiguous data
data = ht.ones((5, 3), dtype=ht.float64, device=ht_device)
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
data.comm.Scan(data, out)
# check the reduction result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank + 1).all())
# non-contiguous data
data = ht.ones((5, 3), dtype=ht.float64, device=ht_device).T
out = ht.zeros_like(data, device=ht_device)
# reduce across all nodes
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
data.comm.Scan(data, out)
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch storage
# consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank + 1).all())
# non-contiguous output
data = ht.ones((5, 3), dtype=ht.float64, device=ht_device)
out = ht.zeros((3, 5), dtype=ht.float64, device=ht_device).T
# reduce across all nodes
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(out._DNDarray__array.is_contiguous())
data.comm.Scan(data, out)
# check the reduction result
# the data tensor will be contiguous after the reduction
# MPI enforces the same data type for send and receive buffer
# the reduction implementation takes care of making the internal Torch storage
# consistent
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(out._DNDarray__array.is_contiguous())
self.assertTrue((out._DNDarray__array == data.comm.rank + 1).all())
def test_scatter(self):
# contiguous data
if ht.MPI_WORLD.rank == 0:
data = ht.ones((ht.MPI_WORLD.size, 5), device=ht_device)
else:
data = ht.zeros((1,), device=ht_device)
output = ht.zeros((1, 5), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Scatter(data, output, root=0)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue((output._DNDarray__array == torch.ones(1, 5, device=device)).all())
# contiguous data, different scatter axis
if ht.MPI_WORLD.rank == 0:
data = ht.ones((5, ht.MPI_WORLD.size), device=ht_device)
else:
data = ht.zeros((1,), device=ht_device)
output = ht.zeros((5, 1), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Scatter(data, output, root=0, axis=1)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue((output._DNDarray__array == torch.ones(5, 1, device=device)).all())
# non-contiguous data
if ht.MPI_WORLD.rank == 0:
data = ht.ones((5, ht.MPI_WORLD.size * 2), device=ht_device).T
self.assertFalse(data._DNDarray__array.is_contiguous())
else:
data = ht.zeros((1,), device=ht_device)
self.assertTrue(data._DNDarray__array.is_contiguous())
output = ht.zeros((2, 5), device=ht_device)
# ensure prior invariants
self.assertTrue(output._DNDarray__array.is_contiguous())
data.comm.Scatter(data, output, root=0)
# check scatter result
if ht.MPI_WORLD.rank == 0:
self.assertFalse(data._DNDarray__array.is_contiguous())
else:
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue((output._DNDarray__array == torch.ones(2, 5, device=device)).all())
# non-contiguous destination, different split axis
if ht.MPI_WORLD.rank == 0:
data = ht.ones((5, ht.MPI_WORLD.size * 2), device=ht_device)
else:
data = ht.zeros((1,), device=ht_device)
output = ht.zeros((2, 5), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
data.comm.Scatter(data, output, root=0, axis=1)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue((output._DNDarray__array == torch.ones(5, 2, device=device)).all())
def test_scatter_like_axes(self):
# input and output are not split
data = ht.array(
[[ht.MPI_WORLD.rank] * ht.MPI_WORLD.size] * ht.MPI_WORLD.size, device=ht_device
)
output = ht.zeros_like(data, device=ht_device)
# main axis send buffer, main axis receive buffer
data.comm.Alltoall(data, output, send_axis=0)
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.repeat(1, ht.MPI_WORLD.size)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# minor axis send buffer, main axis receive buffer
data.comm.Alltoall(data, output, send_axis=1)
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(1, -1)
.repeat(ht.MPI_WORLD.size, 1)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# main axis send buffer, minor axis receive buffer
data = ht.array(
[[ht.MPI_WORLD.rank] * (2 * ht.MPI_WORLD.size)] * ht.MPI_WORLD.size, device=ht_device
)
output = ht.zeros(
(2 * ht.MPI_WORLD.size, ht.MPI_WORLD.size), dtype=data.dtype, device=ht_device
)
data.comm.Alltoall(data, output, send_axis=0, recv_axis=1)
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(1, -1)
.repeat(2 * ht.MPI_WORLD.size, 1)
)
self.assertTrue((output._DNDarray__array == comparison).all())
# minor axis send buffer, minor axis receive buffer
data = ht.array([range(ht.MPI_WORLD.size)] * ht.MPI_WORLD.size, device=ht_device)
output = ht.zeros(
(ht.MPI_WORLD.size, ht.MPI_WORLD.size), dtype=data.dtype, device=ht_device
)
data.comm.Alltoall(data, output, send_axis=0, recv_axis=1)
comparison = (
torch.arange(ht.MPI_WORLD.size, device=device)
.reshape(-1, 1)
.repeat(1, ht.MPI_WORLD.size)
)
self.assertTrue((output._DNDarray__array == comparison).all())
def test_scatterv(self):
# contiguous data buffer, contiguous output buffer
input_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
data = ht.ones((input_count, 12), device=ht_device)
output_count = 2 * (ht.MPI_WORLD.rank + 1)
output = ht.zeros((output_count, 12), device=ht_device)
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Scatterv((data, counts, displs), output, root=0)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 12, device=device)).all()
)
# non-contiguous data buffer, contiguous output buffer
input_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
data = ht.ones((12, input_count), device=ht_device).T
output_count = 2 * (ht.MPI_WORLD.rank + 1)
output = ht.zeros((output_count, 12), device=ht_device)
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Scatterv((data, counts, displs), output, root=0)
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertTrue(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 12, device=device)).all()
)
# contiguous data buffer, non-contiguous output buffer
input_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
data = ht.ones((input_count, 12), device=ht_device)
output_count = 2 * (ht.MPI_WORLD.rank + 1)
output = ht.zeros((12, output_count), device=ht_device).T
# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Scatterv((data, counts, displs), output, root=0)
# check scatter result
self.assertTrue(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 12, device=device)).all()
)
# non-contiguous data buffer, non-contiguous output buffer
input_count = ht.MPI_WORLD.size * (ht.MPI_WORLD.size + 1)
data = ht.ones((12, input_count), device=ht_device).T
output_count = 2 * (ht.MPI_WORLD.rank + 1)
output = ht.zeros((12, output_count), device=ht_device).T
# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
# perform the scatter operation
counts = tuple(range(2, 2 * (ht.MPI_WORLD.size + 1), 2))
displs = tuple(np.cumsum(range(0, 2 * ht.MPI_WORLD.size, 2)))
data.comm.Scatterv((data, counts, displs), output, root=0)
# check scatter result
self.assertFalse(data._DNDarray__array.is_contiguous())
self.assertFalse(output._DNDarray__array.is_contiguous())
self.assertTrue(
(output._DNDarray__array == torch.ones(output_count, 12, device=device)).all()
)
def test_allgathervSorting(self):
test1 = self.sorted3Dtensor.copy()
test2 = self.sorted3Dtensor.copy()
test3 = self.sorted3Dtensor.copy()
result = self.sorted3Dtensor.copy()
test1.resplit_(axis=0)
test2.resplit_(axis=1)
test3.resplit_(axis=2)
gathered1_counts, gathered1_displs, _ = test1.comm.counts_displs_shape(
test1.shape, test1.split
)
gathered1 = torch.empty(self.sorted3Dtensor.shape, device=device)
gathered2_counts, gathered2_displs, _ = test2.comm.counts_displs_shape(
test2.shape, test2.split
)
gathered2 = torch.empty(self.sorted3Dtensor.shape, device=device)
gathered3_counts, gathered3_displs, _ = test3.comm.counts_displs_shape(
test3.shape, test3.split
)
gathered3 = torch.empty(self.sorted3Dtensor.shape, device=device)
test1.comm.Allgatherv(
test1, (gathered1, gathered1_counts, gathered1_displs), recv_axis=test1.split
)
self.assertTrue(torch.equal(gathered1, result._DNDarray__array))
test2.comm.Allgatherv(
test2, (gathered2, gathered2_counts, gathered2_displs), recv_axis=test2.split
)
self.assertTrue(torch.equal(gathered2, result._DNDarray__array))
test3.comm.Allgatherv(
test3, (gathered3, gathered3_counts, gathered3_displs), recv_axis=test3.split
)
self.assertTrue(torch.equal(gathered3, result._DNDarray__array))
def test_alltoallSorting(self):
test1 = self.sorted3Dtensor.copy()
test1.resplit_(axis=2)
comparison1 = self.sorted3Dtensor.copy()
comparison1.resplit_(axis=1)
redistributed1 = torch.empty(
comparison1.lshape, dtype=test1.dtype.torch_type(), device=device
)
test1.comm.Alltoallv(
test1._DNDarray__array,
redistributed1,
send_axis=comparison1.split,
recv_axis=test1.split,
)
self.assertTrue(torch.equal(redistributed1, comparison1._DNDarray__array))
test2 = self.sorted3Dtensor.copy()
test2.resplit_(axis=1)
comparison2 = self.sorted3Dtensor.copy()
comparison2.resplit_(axis=0)
send_counts, send_displs, _ = test2.comm.counts_displs_shape(
test2.lshape, comparison2.split
)
recv_counts, recv_displs, _ = test2.comm.counts_displs_shape(test2.shape, test2.split)
redistributed2 = torch.empty(
comparison2.lshape, dtype=test2.dtype.torch_type(), device=device
)
test2.comm.Alltoallv(
(test2._DNDarray__array, send_counts, send_displs),
(redistributed2, recv_counts, recv_displs),
send_axis=comparison2.split,
recv_axis=test2.split,
)
self.assertTrue(torch.equal(redistributed2, comparison2._DNDarray__array))
test3 = self.sorted3Dtensor.copy()
test3.resplit_(axis=0)
comparison3 = self.sorted3Dtensor.copy()
comparison3.resplit_(axis=2)
redistributed3 = torch.empty(
comparison3.lshape, dtype=test3.dtype.torch_type(), device=device
)
test3.comm.Alltoallv(
test3._DNDarray__array,
redistributed3,
send_axis=comparison3.split,
recv_axis=test3.split,
)
self.assertTrue(torch.equal(redistributed3, comparison3._DNDarray__array))
test4 = self.sorted3Dtensor.copy()
test4.resplit_(axis=2)
comparison4 = self.sorted3Dtensor.copy()
comparison4.resplit_(axis=0)
redistributed4 = torch.empty(
comparison4.lshape, dtype=test4.dtype.torch_type(), device=device
)
test4.comm.Alltoallv(
test4._DNDarray__array,
redistributed4,
send_axis=comparison4.split,
recv_axis=test4.split,
)
self.assertTrue(torch.equal(redistributed4, comparison4._DNDarray__array))
with self.assertRaises(NotImplementedError):
test4.comm.Alltoallv(test4._DNDarray__array, redistributed4, send_axis=2, recv_axis=2)
with self.assertRaises(NotImplementedError):
test4.comm.Alltoallv(test4._DNDarray__array, redistributed4, send_axis=None)
| [
"torch.arange",
"torch.equal",
"torch.ones",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.tensor",
"torch.zeros_like",
"torch.empty"
] | 1.3.1 | bhagemeier/heat | b362b61a558c4a69cd9a884051b5efcc74f494da |
1.9 | """
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
from collections import namedtuple
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torchvision import models
from torchvision.models.vgg import model_urls
def init_weights(modules):
for m in modules:
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class vgg16_bn(torch.nn.Module):
def __init__(self, pretrained=True, freeze=True):
super(vgg16_bn, self).__init__()
model_urls["vgg16_bn"] = model_urls["vgg16_bn"].replace("https://", "http://")
vgg_pretrained_features = models.vgg16_bn(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(12): # conv2_2
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 19): # conv3_3
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(19, 29): # conv4_3
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(29, 39): # conv5_3
self.slice4.add_module(str(x), vgg_pretrained_features[x])
# fc6, fc7 without atrous conv
self.slice5 = torch.nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6),
nn.Conv2d(1024, 1024, kernel_size=1),
)
if not pretrained:
init_weights(self.slice1.modules())
init_weights(self.slice2.modules())
init_weights(self.slice3.modules())
init_weights(self.slice4.modules())
init_weights(self.slice5.modules()) # no pretrained model for fc6 and fc7
if freeze:
for param in self.slice1.parameters(): # only first conv
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu2_2 = h
h = self.slice2(h)
h_relu3_2 = h
h = self.slice3(h)
h_relu4_3 = h
h = self.slice4(h)
h_relu5_3 = h
h = self.slice5(h)
h_fc7 = h
vgg_outputs = namedtuple(
"VggOutputs", ["fc7", "relu5_3", "relu4_3", "relu3_2", "relu2_2"]
)
out = vgg_outputs(h_fc7, h_relu5_3, h_relu4_3, h_relu3_2, h_relu2_2)
return out
class double_conv(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch + mid_ch, mid_ch, kernel_size=1),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=True),
nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class CRAFT(nn.Module):
def __init__(self, pretrained=False, freeze=False):
super(CRAFT, self).__init__()
""" Base network """
self.basenet = vgg16_bn(pretrained, freeze)
""" U network """
self.upconv1 = double_conv(1024, 512, 256)
self.upconv2 = double_conv(512, 256, 128)
self.upconv3 = double_conv(256, 128, 64)
self.upconv4 = double_conv(128, 64, 32)
num_class = 2
self.conv_cls = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 16, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(16, 16, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(16, num_class, kernel_size=1),
)
init_weights(self.upconv1.modules())
init_weights(self.upconv2.modules())
init_weights(self.upconv3.modules())
init_weights(self.upconv4.modules())
init_weights(self.conv_cls.modules())
def forward(self, x):
"""Base network"""
sources = self.basenet(x)
""" U network """
y = torch.cat([sources[0], sources[1]], dim=1)
y = self.upconv1(y)
y = F.interpolate(
y, size=sources[2].size()[2:], mode="bilinear", align_corners=False
)
y = torch.cat([y, sources[2]], dim=1)
y = self.upconv2(y)
y = F.interpolate(
y, size=sources[3].size()[2:], mode="bilinear", align_corners=False
)
y = torch.cat([y, sources[3]], dim=1)
y = self.upconv3(y)
y = F.interpolate(
y, size=sources[4].size()[2:], mode="bilinear", align_corners=False
)
y = torch.cat([y, sources[4]], dim=1)
feature = self.upconv4(y)
y = self.conv_cls(feature)
return y.permute(0, 2, 3, 1), feature
| [
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.9.0 | nhatnxn/layout_GateGCN | 66ead2482fc5148668cfc776e0a3ac03d916e897 |
0.4 | ## TODO: define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
from torchvision import models
from collections import OrderedDict
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
self.conv1 = nn.Conv2d(1, 32, 5)
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
## x = self.pool(F.relu(self.conv1(x)))
# a modified x, having gone through all the layers of your model, should be returned
return x
class NaimishNet(nn.Module):
def __init__(self, image_size, output_size = 136, kernels = [5,5,5,5],out_channels = [32,64,128,256],
dropout_p = [0, 0, 0, 0, 0, 0], use_padding=True, use_maxp = True):
super(NaimishNet, self).__init__()
# padding only support odd numbered kernels in this implementation
self.use_padding = use_padding
# init padding
if self.use_padding:
self.padding = [int((k-1)/2) for k in kernels]
else:
self.padding = [0,0,0,0]
# Find the size of the last maxp output.
last_maxp_size = image_size
for idx, val in enumerate(kernels):
if self.use_padding:
last_maxp_size = last_maxp_size//2
else:
last_maxp_size = (last_maxp_size - (val-1))//2
last_maxp_size = out_channels[3] * last_maxp_size * last_maxp_size
self.conv1 = nn.Sequential(
OrderedDict([
('conv1', nn.Conv2d(1, out_channels[0], kernel_size=kernels[0], padding=self.padding[0])),
('relu1', nn.ReLU())
])) # (32, 252, 252)
if use_maxp:
self.maxp1 = nn.Sequential(OrderedDict([
('maxp1', nn.MaxPool2d(2, 2)),
('dropout1', nn.Dropout(dropout_p[0])),
('bachnorm1', nn.BatchNorm2d(out_channels[0]))
])) # (32, 126, 126)
else:
self.maxp1 = nn.Sequential(OrderedDict([
('maxp1', nn.AvgPool2d(2, 2)),
('dropout1', nn.Dropout(dropout_p[0])),
('bachnorm1', nn.BatchNorm2d(out_channels[0]))
])) # (32, 126, 126)
self.conv2 = nn.Sequential(OrderedDict([
('conv2', nn.Conv2d(out_channels[0], out_channels[1], kernel_size=kernels[1], padding=self.padding[1])),
('relu2', nn.ReLU())
])) # (64, 122, 122)
if use_maxp:
self.maxp2 = nn.Sequential(OrderedDict([
('maxp2', nn.MaxPool2d(2, 2)),
('dropout2', nn.Dropout(dropout_p[1])),
('bachnorm2', nn.BatchNorm2d(out_channels[1]))
])) # (64, 61, 61)
else:
self.maxp2 = nn.Sequential(OrderedDict([
('maxp2', nn.AvgPool2d(2, 2)),
('dropout2', nn.Dropout(dropout_p[1])),
('bachnorm2', nn.BatchNorm2d(out_channels[1]))
])) # (64, 61, 61)
self.conv3 = nn.Sequential(OrderedDict([
('conv3', nn.Conv2d(out_channels[1], out_channels[2], kernel_size=kernels[2], padding=self.padding[2])),
('relu3', nn.ReLU())
])) # (128, 59, 59)
if use_maxp:
self.maxp3 = nn.Sequential(OrderedDict([
('maxp3', nn.MaxPool2d(2, 2)),
('dropout3', nn.Dropout(dropout_p[2])),
('bachnorm3', nn.BatchNorm2d(out_channels[2]))
])) # (128, 29, 29)
else:
self.maxp3 = nn.Sequential(OrderedDict([
('maxp3', nn.AvgPool2d(2, 2)),
('dropout3', nn.Dropout(dropout_p[2])),
('bachnorm3', nn.BatchNorm2d(out_channels[2]))
])) # (128, 29, 29)
self.conv4 = nn.Sequential(OrderedDict([
('conv4', nn.Conv2d(out_channels[2], out_channels[3], kernel_size=kernels[3], padding=self.padding[3])),
('relu4', nn.ReLU())
])) # (256, 27, 27)
if use_maxp:
self.maxp4 = nn.Sequential(OrderedDict([
('maxp4', nn.MaxPool2d(2, 2)),
('dropout4', nn.Dropout(dropout_p[3])),
('bachnorm4', nn.BatchNorm2d(out_channels[3]))
])) # (256, 13, 13)
else:
self.maxp4 = nn.Sequential(OrderedDict([
('maxp4', nn.AvgPool2d(2, 2)),
('dropout4', nn.Dropout(dropout_p[3])),
('bachnorm4', nn.BatchNorm2d(out_channels[3]))
])) # (256, 13, 13)
self.fc1 = nn.Sequential(OrderedDict([
('fc1', nn.Linear(last_maxp_size, 1024)),
('relu5', nn.ReLU()),
('dropout5', nn.Dropout(dropout_p[4])),
('bachnorm5', nn.BatchNorm1d(1024))
])) # (36864, 1024)
self.fc2 = nn.Sequential(OrderedDict([
('fc2', nn.Linear(1024, 1024)),
('relu6', nn.ReLU()),
('dropout6', nn.Dropout(dropout_p[5])),
('bachnorm6', nn.BatchNorm1d(1024))
])) # (1024, 1024)
self.fc3 = nn.Sequential(OrderedDict([
('fc3', nn.Linear(1024, output_size))
])) # (1024, 136)
def forward(self, x):
out = self.conv1(x)
out = self.maxp1(out)
out = self.conv2(out)
out = self.maxp2(out)
out = self.conv3(out)
out = self.maxp3(out)
out = self.conv4(out)
out = self.maxp4(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.fc2(out)
out = self.fc3(out)
return out
def __str__(self):
pretty_net_str = ''
for layer_name in self._modules:
pretty_net_str += f'{layer_name}:\n'
for items in getattr(self, layer_name):
pretty_net_str += f'{items}\n'
pretty_net_str += '\n'
return pretty_net_str
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d"
] | 0.4.0 | rishabhchhillar/Facial-Keypoints-Detection | c8fb7db7b4743a753e9234e6236bcdd6b0f6de77 |
1.0 | # -*- coding: utf-8 -*-
# file: sentiment_classifier.py
# author: yangheng <[email protected]>
# Copyright (C) 2020. All Rights Reserved.
import json
import os
import pickle
import random
import numpy
import torch
from findfile import find_file
from termcolor import colored
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, AutoModel
from pyabsa.core.apc.classic.__glove__.dataset_utils.data_utils_for_training import build_embedding_matrix, build_tokenizer
from pyabsa.core.apc.models.ensembler import APCEnsembler
from pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError
from pyabsa.functional.dataset import detect_infer_dataset
from pyabsa.core.apc.models import (APCModelList,
GloVeAPCModelList,
BERTBaselineAPCModelList
)
from pyabsa.core.apc.classic.__bert__.dataset_utils.data_utils_for_inferring import BERTBaselineABSADataset
from pyabsa.core.apc.classic.__glove__.dataset_utils.data_utils_for_inferring import GloVeABSADataset
from pyabsa.core.apc.dataset_utils.apc_utils import LABEL_PADDING
from pyabsa.core.apc.dataset_utils.data_utils_for_inferring import ABSADataset
class SentimentClassifier:
def __init__(self, model_arg=None, sentiment_map=None, eval_batch_size=128):
'''
from_train_model: load inferring_tutorials model from trained model
'''
self.initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal,
'orthogonal_': torch.nn.init.orthogonal_
}
# load from a training
if not isinstance(model_arg, str):
print('Load sentiment classifier from training')
self.model = model_arg[0]
self.opt = model_arg[1]
self.tokenizer = model_arg[2]
else:
# load from a model path
try:
if 'fine-tuned' in model_arg:
raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!')
print('Load sentiment classifier from', model_arg)
state_dict_path = find_file(model_arg, '.state_dict', exclude_key=['__MACOSX'])
model_path = find_file(model_arg, '.model', exclude_key=['__MACOSX'])
tokenizer_path = find_file(model_arg, '.tokenizer', exclude_key=['__MACOSX'])
config_path = find_file(model_arg, '.config', exclude_key=['__MACOSX'])
print('config: {}'.format(config_path))
print('state_dict: {}'.format(state_dict_path))
print('model: {}'.format(model_path))
print('tokenizer: {}'.format(tokenizer_path))
self.opt = pickle.load(open(config_path, mode='rb'))
self.opt.eval_batch_size = eval_batch_size
if state_dict_path:
if not hasattr(GloVeAPCModelList, self.opt.model.__name__.upper()):
if state_dict_path:
self.model = APCEnsembler(self.opt, load_dataset=False)
self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))
if model_path:
self.model = torch.load(model_path, map_location='cpu')
else:
tokenizer = build_tokenizer(
dataset_list=self.opt.dataset_file,
max_seq_len=self.opt.max_seq_len,
dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
embedding_matrix = build_embedding_matrix(
word2idx=tokenizer.word2idx,
embed_dim=self.opt.embed_dim,
dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
self.model = self.opt.model(embedding_matrix, self.opt).to(self.opt.device)
self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))
if model_path:
self.model = torch.load(model_path, map_location='cpu')
if tokenizer_path:
self.tokenizer = pickle.load(open(tokenizer_path, mode='rb'))
else:
try:
self.tokenizer = AutoTokenizer.from_pretrained(self.opt.pretrained_bert, do_lower_case='uncased' in self.opt.pretrained_bert)
except ValueError:
raise TransformerConnectionError()
print('Config used in Training:')
print_args(self.opt, mode=1)
except Exception as e:
raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg))
if isinstance(self.opt.model, list):
if hasattr(APCModelList, self.opt.model[0].__name__):
self.dataset = ABSADataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(BERTBaselineAPCModelList, self.opt.model[0].__name__):
self.dataset = BERTBaselineABSADataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(GloVeAPCModelList, self.opt.model[0].__name__):
self.dataset = GloVeABSADataset(tokenizer=self.tokenizer, opt=self.opt)
else:
raise KeyError('The ref_checkpoint you are loading is not from APC model.')
else:
if hasattr(APCModelList, self.opt.model.__name__):
self.dataset = ABSADataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(BERTBaselineAPCModelList, self.opt.model.__name__):
self.dataset = BERTBaselineABSADataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(GloVeAPCModelList, self.opt.model.__name__):
self.dataset = GloVeABSADataset(tokenizer=self.tokenizer, opt=self.opt)
else:
raise KeyError('The ref_checkpoint you are loading is not from APC model.')
self.infer_dataloader = None
if self.opt.seed is not None:
random.seed(self.opt.seed)
numpy.random.seed(self.opt.seed)
torch.manual_seed(self.opt.seed)
torch.cuda.manual_seed(self.opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.opt.initializer = self.opt.initializer
self.sentiment_map = None
self.set_sentiment_map(sentiment_map)
def set_sentiment_map(self, sentiment_map):
if sentiment_map:
print(colored('Warning: set_sentiment_map() is deprecated, please directly set labels within dataset.', 'red'))
sentiment_map[LABEL_PADDING] = ''
self.sentiment_map = sentiment_map
def to(self, device=None):
self.opt.device = device
self.model.to(device)
def cpu(self):
self.opt.device = 'cpu'
self.model.to('cpu')
def cuda(self, device='cuda:0'):
self.opt.device = device
self.model.to(device)
def batch_infer(self,
target_file=None,
print_result=True,
save_result=False,
clear_input_samples=True,
ignore_error=True):
if clear_input_samples:
self.clear_input_samples()
save_path = os.path.join(os.getcwd(), 'apc_inference.result.json')
target_file = detect_infer_dataset(target_file, task='apc')
if not target_file:
raise FileNotFoundError('Can not find inference datasets!')
self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error)
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False)
return self._infer(save_path=save_path if save_result else None, print_result=print_result)
def infer(self, text: str = None,
print_result=True,
clear_input_samples=True):
if clear_input_samples:
self.clear_input_samples()
if text:
self.dataset.prepare_infer_sample(text)
else:
raise RuntimeError('Please specify your datasets path!')
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False)
return self._infer(print_result=print_result)
def merge_results(self, results):
""" merge APC results have the same input text
"""
final_res = []
for result in results:
if final_res and "".join(final_res[-1]['text'].split()) == "".join(result['text'].split()):
final_res[-1]['aspect'].append(result['aspect'])
final_res[-1]['sentiment'].append(result['sentiment'])
final_res[-1]['ref_sentiment'].append(result['ref_sentiment'])
final_res[-1]['ref_check'].append(result['ref_check'])
else:
final_res.append(
{
'text': result['text'].replace(' ', ' '),
'aspect': [result['aspect']],
'sentiment': [result['sentiment']],
'ref_sentiment': [result['ref_sentiment']],
'ref_check': [result['ref_check']]
}
)
return final_res
def _infer(self, save_path=None, print_result=True):
_params = filter(lambda p: p.requires_grad, self.model.parameters())
correct = {True: 'Correct', False: 'Wrong'}
results = []
with torch.no_grad():
self.model.eval()
n_correct = 0
n_labeled = 0
n_total = 0
for _, sample in enumerate(self.infer_dataloader):
inputs = {col: sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'polarity'}
self.model.eval()
outputs = self.model(inputs)
sen_logits = outputs['logits']
t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy()
for i, i_probs in enumerate(t_probs):
if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)) in self.opt.index_to_label:
sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))]
real_sent = sample['polarity'][i] if isinstance(sample['polarity'][i], str) else self.opt.index_to_label[int(sample['polarity'][i])]
if real_sent != -999 and real_sent != '-999':
n_labeled += 1
if sent == real_sent:
n_correct += 1
else: # for the former versions before 1.2.0
sent = int(i_probs.argmax(axis=-1))
real_sent = int(sample['polarity'][i])
aspect = sample['aspect'][i]
text_raw = sample['text_raw'][i]
results.append({
'text': text_raw,
'aspect': aspect,
'sentiment': sent,
'ref_sentiment': real_sent,
'ref_check': correct[sent == real_sent] if real_sent != '-999' else '',
})
n_total += 1
results = self.merge_results(results)
try:
if print_result:
for result in results:
text_printing = result['text']
for i in range(len(result['aspect'])):
if result['ref_sentiment'][i] != -999:
if result['sentiment'][i] == result['ref_sentiment'][i]:
aspect_info = colored('{} -> {}(ref:{})'.format(result['aspect'][i], result['sentiment'][i], result['ref_sentiment'][i]), 'green')
else:
aspect_info = colored('{} -> {}(ref:{})'.format(result['aspect'][i], result['sentiment'][i], result['ref_sentiment'][i]), 'red')
else:
aspect_info = '{} -> {}'.format(result['aspect'][i], result['sentiment'][i])
text_printing = text_printing.replace(result['aspect'][i], aspect_info)
print(text_printing)
if save_path:
fout = open(save_path, 'w', encoding='utf8')
json.dump(json.JSONEncoder().encode({'results': results}), fout, ensure_ascii=False)
# fout.write('Total samples:{}\n'.format(n_total))
# fout.write('Labeled samples:{}\n'.format(n_labeled))
# fout.write('Prediction Accuracy:{}%\n'.format(100 * n_correct / n_labeled)) if n_labeled else 'N.A.'
print('inference result saved in: {}'.format(save_path))
except Exception as e:
print('Can not save result: {}, Exception: {}'.format(text_raw, e))
if len(self.infer_dataloader) > 1:
print('Total samples:{}'.format(n_total))
print('Labeled samples:{}'.format(n_labeled))
print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.'))
return results
def clear_input_samples(self):
self.dataset.all_data = []
| [
"torch.cuda.manual_seed",
"torch.no_grad",
"torch.softmax",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.0 | WeiLi9811/PyABSA | e1595784b8c978c1e91c0d8139a0a4dc36ac5965 |
1.1 | from functools import reduce
from itertools import permutations
from typing import Dict
from typing import Optional
from typing import Tuple
import logging
import torch
from torch_complex.tensor import ComplexTensor
from typeguard import check_argument_types
from espnet2.enh.abs_enh import AbsEnhancement
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
"""
se model:
input : wavform
output: magnitude
"""
class ESPnetEnhancementModel(AbsESPnetModel):
"""Speech enhancement or separation Frontend model"""
def __init__(
self,
enh_model: Optional[AbsEnhancement],
):
assert check_argument_types()
super().__init__()
self.enh_model = enh_model
self.num_spk = enh_model.num_spk
self.num_noise_type = getattr(self.enh_model, "num_noise_type", 1)
# get mask type for TF-domain models
self.mask_type = getattr(self.enh_model, "mask_type", None)
# get loss type for model training
self.loss_type = getattr(self.enh_model, "loss_type", None)
assert self.loss_type in (
# mse_loss(predicted_mask, target_label)
"mask_mse",
# mse_loss(enhanced_magnitude_spectrum, target_magnitude_spectrum)
"magnitude",
"magnitude3",
# mse_loss(enhanced_complex_spectrum, target_complex_spectrum)
"spectrum",
# si_snr(enhanced_waveform, target_waveform)
"si_snr",
), self.loss_type
# for multi-channel signal
self.ref_channel = getattr(self.enh_model, "ref_channel", -1)
def _create_mask_label(self, mix_spec, ref_spec, mask_type="IAM"):
"""Create mask label.
:param mix_spec: ComplexTensor(B, T, F)
:param ref_spec: [ComplexTensor(B, T, F), ...] or ComplexTensor(B, T, F)
:param noise_spec: ComplexTensor(B, T, F)
:return: [Tensor(B, T, F), ...] or [ComplexTensor(B, T, F), ...]
"""
assert mask_type in [
"IBM",
"IRM",
"IAM",
"PSM",
"NPSM",
"PSM^2",
], f"mask type {mask_type} not supported"
eps = 10e-8
mask_label = []
for r in ref_spec:
mask = None
if mask_type == "IBM":
flags = [abs(r) >= abs(n) for n in ref_spec]
mask = reduce(lambda x, y: x * y, flags)
mask = mask.int()
elif mask_type == "IRM":
# TODO(Wangyou): need to fix this,
# as noise referecens are provided separately
mask = abs(r) / (sum(([abs(n) for n in ref_spec])) + eps)
elif mask_type == "IAM":
mask = abs(r) / (abs(mix_spec) + eps)
mask = mask.clamp(min=0, max=1)
elif mask_type == "PSM" or mask_type == "NPSM":
phase_r = r / (abs(r) + eps)
phase_mix = mix_spec / (abs(mix_spec) + eps)
# cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)
cos_theta = (
phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag
)
mask = (abs(r) / (abs(mix_spec) + eps)) * cos_theta
mask = (
mask.clamp(min=0, max=1)
if mask_label == "NPSM"
else mask.clamp(min=-1, max=1)
)
elif mask_type == "PSM^2":
# This is for training beamforming masks
phase_r = r / (abs(r) + eps)
phase_mix = mix_spec / (abs(mix_spec) + eps)
# cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)
cos_theta = (
phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag
)
mask = (abs(r).pow(2) / (abs(mix_spec).pow(2) + eps)) * cos_theta
mask = mask.clamp(min=-1, max=1)
assert mask is not None, f"mask type {mask_type} not supported"
mask_label.append(mask)
return mask_label
def forward(
self,
speech_mix: torch.Tensor,
speech_mix_lengths: torch.Tensor = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech_mix: (Batch, samples) or (Batch, samples, channels)
speech_ref: (Batch, num_speaker, samples)
or (Batch, num_speaker, samples, channels)
speech_mix_lengths: (Batch,), default None for chunk interator,
because the chunk-iterator does not have the
speech_lengths returned. see in
espnet2/iterators/chunk_iter_factory.py
"""
# clean speech signal of each speaker
speech_ref = [
kwargs["speech_ref{}".format(spk + 1)] for spk in range(self.num_spk)
]
# (Batch, num_speaker, samples) or (Batch, num_speaker, samples, channels)
speech_ref = torch.stack(speech_ref, dim=1)
batch_size = speech_mix.shape[0]
speech_lengths = (
speech_mix_lengths
if speech_mix_lengths is not None
else torch.ones(batch_size).int() * speech_mix.shape[1]
)
assert speech_lengths.dim() == 1, speech_lengths.shape
# Check that batch_size is unified
assert speech_mix.shape[0] == speech_ref.shape[0] == speech_lengths.shape[0], (
speech_mix.shape,
speech_ref.shape,
speech_lengths.shape,
)
batch_size = speech_mix.shape[0]
# for data-parallel
speech_ref = speech_ref[:, :, : speech_lengths.max()]
speech_mix = speech_mix[:, : speech_lengths.max()]
# predict magnidude (it is list) and masks
logging.info(f"in the espnet_model1, forward function, speech_lenghts is {speech_lengths} its shape is {speech_lengths.shape}")
predicted_magnitude, tf_length, mask_pre = self.enh_model(
speech_mix, speech_lengths
)
logging.info(f"predicted_magnitude is {predicted_magnitude} its shape is {predicted_magnitude[0].shape}")
# prepared ref magnitude, wave -> stft -> abs -> magnitude
speech_ref = speech_ref.squeeze(1) # (B,1,samples) -> (B, samples)
logging.info(f"in the espnet_model1, speech_ref is {speech_ref} its shape is {speech_ref.shape}")
input_spectrum, flens = self.enh_model.stft(speech_ref, speech_lengths) # it need to check speech_lengths
input_spectrum = ComplexTensor(input_spectrum[..., 0], input_spectrum[..., 1])
magnitude_ref = abs(input_spectrum)
logging.info(f"in the espnet_model1, magnitude_ref is {magnitude_ref} its shape is{magnitude_ref.shape}")
if self.loss_type == "magnitude3":
# compute loss on magnitude spectrum
# magnitude_ref is B x T x F
# magnitude_pre[0] is B x T x F
logging.info(f"in espnet_model1,using self.loss_type is {self.loss_type }, magnitude_ref shape is {magnitude_ref[0].shape}")
logging.info(f"in espnet_model1,, predicted_magnitude[0] shape is {predicted_magnitude[0].shape}")
tf_loss, perm = self._permutation_loss3(
magnitude_ref, predicted_magnitude[0], tf_length,
)
loss = tf_loss
stats = dict(
loss=loss.detach(),
)
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
logging.info(f"final loss is {loss}, stats is {stats}, weight is {weight}")
return loss, stats, weight
@staticmethod
def tf_mse_loss(ref, inf):
"""time-frequency MSE loss.
:param ref: (Batch, T, F)
:param inf: (Batch, T, F)
:return: (Batch)
"""
assert ref.dim() == inf.dim(), (ref.shape, inf.shape)
if ref.dim() == 3:
mseloss = (abs(ref - inf) ** 2).mean(dim=[1, 2])
elif ref.dim() == 4:
mseloss = (abs(ref - inf) ** 2).mean(dim=[1, 2, 3])
else:
raise ValueError("Invalid input shape: ref={}, inf={}".format(ref, inf))
return mseloss
@staticmethod
def tf_l1_loss(ref, inf):
"""time-frequency L1 loss.
:param ref: (Batch, T, F) or (Batch, T, C, F)
:param inf: (Batch, T, F) or (Batch, T, C, F)
:return: (Batch)
"""
assert ref.dim() == inf.dim(), (ref.shape, inf.shape)
if ref.dim() == 3:
l1loss = abs(ref - inf).mean(dim=[1, 2])
elif ref.dim() == 4:
l1loss = abs(ref - inf).mean(dim=[1, 2, 3])
else:
raise ValueError("Invalid input shape: ref={}, inf={}".format(ref, inf))
return l1loss
@staticmethod
def si_snr_loss(ref, inf):
"""si-snr loss
:param ref: (Batch, samples)
:param inf: (Batch, samples)
:return: (Batch)
"""
ref = ref / torch.norm(ref, p=2, dim=1, keepdim=True)
inf = inf / torch.norm(inf, p=2, dim=1, keepdim=True)
s_target = (ref * inf).sum(dim=1, keepdims=True) * ref
e_noise = inf - s_target
si_snr = 20 * torch.log10(
torch.norm(s_target, p=2, dim=1) / torch.norm(e_noise, p=2, dim=1)
)
return -si_snr
@staticmethod
def si_snr_loss_zeromean(ref, inf):
"""si_snr loss with zero-mean in pre-processing.
:param ref: (Batch, samples)
:param inf: (Batch, samples)
:return: (Batch)
"""
eps = 1e-8
assert ref.size() == inf.size()
B, T = ref.size()
# mask padding position along T
# Step 1. Zero-mean norm
mean_target = torch.sum(ref, dim=1, keepdim=True) / T
mean_estimate = torch.sum(inf, dim=1, keepdim=True) / T
zero_mean_target = ref - mean_target
zero_mean_estimate = inf - mean_estimate
# Step 2. SI-SNR with order
# reshape to use broadcast
s_target = zero_mean_target # [B, T]
s_estimate = zero_mean_estimate # [B, T]
# s_target = <s', s>s / ||s||^2
pair_wise_dot = torch.sum(s_estimate * s_target, dim=1, keepdim=True) # [B, 1]
s_target_energy = torch.sum(s_target ** 2, dim=1, keepdim=True) + eps # [B, 1]
pair_wise_proj = pair_wise_dot * s_target / s_target_energy # [B, T]
# e_noise = s' - s_target
e_noise = s_estimate - pair_wise_proj # [B, T]
# SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)
pair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim=1) / (
torch.sum(e_noise ** 2, dim=1) + eps
)
# print('pair_si_snr',pair_wise_si_snr[0,:])
pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + eps) # [B]
# print(pair_wise_si_snr)
return -1 * pair_wise_si_snr
@staticmethod
def _permutation_loss(ref, inf, criterion, perm=None):
"""The basic permutation loss function.
Args:
ref (List[torch.Tensor]): [(batch, ...), ...]
inf (List[torch.Tensor]): [(batch, ...), ...]
criterion (function): Loss function
perm: (batch)
Returns:
torch.Tensor: (batch)
"""
num_spk = len(ref)
def pair_loss(permutation):
return sum(
[criterion(ref[s], inf[t]) for s, t in enumerate(permutation)]
) / len(permutation)
losses = torch.stack(
[pair_loss(p) for p in permutations(range(num_spk))], dim=1
)
if perm is None:
loss, perm = torch.min(losses, dim=1)
else:
loss = losses[torch.arange(losses.shape[0]), perm]
return loss.mean(), perm
@staticmethod
def _permutation_loss3(ref, inf, magnitude_lengths, perm=None):
logging.info(f"in _permutation_loss3, ref shape {ref.shape} and inf shape is {inf.shape}")
logging.info(f"in _permutation_loss3, magnitude_lengths is {magnitude_lengths}")
input_size = magnitude_lengths
def loss():
loss_for_permute = []
#logging.info(f"masks_[0] type is {type(masks_[0])}")
#logging.info(f"ref[0] type is {type(ref[0])}")
# N X T X F
inf_magnitude = inf
logging.info(f"in _permutation_loss3,inf_magnitude shape is {inf_magnitude.shape}")
# N X T X F
ref_magnitude = ref
logging.info(f"in _permutation_loss3,ref_magnitude shape is {ref_magnitude.shape}")
# N X T X F
mse = torch.pow(inf_magnitude - ref_magnitude, 2)
# N X T X 1
mse_sum1 = torch.sum(mse, -1)
# N X 1 X1
utt_loss = torch.sum(mse_sum1, -1)
# utt_loss = torch.sum(torch.sum(torch.pow(masks_[int(0)]*inf - ref[int(0)], 2), -1), -1)
loss_for_permute.append(utt_loss)
logging.info(f"input_size device is {input_size.device}")
logging.info(f"")
input_size_ = torch.tensor(input_size, dtype=torch.float32, device=inf_magnitude.device)
logging.info(f"input_size device again is {input_size.device}")
loss_perutt = sum(loss_for_permute) / input_size_
return loss_perutt
#logging.info(f"num_utts is {ref[0].shape[0]}")
num_utts = ref.shape[0] # batch size
logging.info(f"in _permutation_loss3,num_utts is {num_utts}")
# O(N!), could be optimized
# 1 x N
pscore = torch.stack([loss()], dim=0)
# pscore = torch.stack([loss(p) for p in permutations(range(num_spk))], dim=1)
logging.info(f"pscore is {pscore}")
# N
num_spk=1
min_perutt, _ = torch.min(pscore, dim=0)
loss = torch.sum(min_perutt) / (num_spk * num_utts)
"""
the loss sum freq and sum time ,then average on the time axis, then average on the number of utterances
"""
logging.info(f"loss is {loss}")
return loss , perm
def collect_feats(
self, speech_mix: torch.Tensor, speech_mix_lengths: torch.Tensor, **kwargs
) -> Dict[str, torch.Tensor]:
# for data-parallel
speech_mix = speech_mix[:, : speech_mix_lengths.max()]
feats, feats_lengths = speech_mix, speech_mix_lengths
return {"feats": feats, "feats_lengths": feats_lengths}
| [
"torch.stack",
"torch.min",
"torch.arange",
"torch.norm",
"torch.pow",
"torch.log10",
"torch.ones",
"torch.tensor",
"torch.sum"
] | 1.1.0 | ishine/DPSL-ASR | fabb66cbd7f24f7a05c64f5b7e87af154f39ceb2 |
1.6 | #!/usr/bin/python
#-*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy, math, pdb, sys, random
import time, os, itertools, shutil, importlib
from tuneThreshold import tuneThresholdfromScore
from DatasetLoader import test_dataset_loader
from torch.cuda.amp import autocast, GradScaler
class WrappedModel(nn.Module):
## The purpose of this wrapper is to make the model structure consistent between single and multi-GPU
def __init__(self, model):
super(WrappedModel, self).__init__()
self.module = model
def forward(self, x, label=None):
return self.module(x, label)
class SpeakerNet(nn.Module):
def __init__(self, model, optimizer, trainfunc, nPerSpeaker, **kwargs):
super(SpeakerNet, self).__init__();
SpeakerNetModel = importlib.import_module('models.'+model).__getattribute__('MainModel')
self.__S__ = SpeakerNetModel(**kwargs);
LossFunction = importlib.import_module('loss.'+trainfunc).__getattribute__('LossFunction')
self.__L__ = LossFunction(**kwargs);
self.nPerSpeaker = nPerSpeaker
def forward(self, data, label=None):
data = data.reshape(-1,data.size()[-1]).cuda()
outp = self.__S__.forward(data)
if label == None:
return outp
else:
outp = outp.reshape(self.nPerSpeaker,-1,outp.size()[-1]).transpose(1,0).squeeze(1)
nloss, prec1 = self.__L__.forward(outp,label)
return nloss, prec1
class SpeakerNetCPU(SpeakerNet):
def __init__(self, model, optimizer, trainfunc, nPerSpeaker, **kwargs):
super(SpeakerNet, self).__init__();
SpeakerNetModel = importlib.import_module('models.'+model).__getattribute__('MainModel')
self.__S__ = SpeakerNetModel(**kwargs);
LossFunction = importlib.import_module('loss.'+trainfunc).__getattribute__('LossFunction')
self.__L__ = LossFunction(**kwargs);
self.nPerSpeaker = nPerSpeaker
def forward(self, data, label=None):
data = data.reshape(-1,data.size()[-1])
outp = self.__S__.forward(data)
if label == None:
return outp
else:
outp = outp.reshape(self.nPerSpeaker,-1,outp.size()[-1]).transpose(1,0).squeeze(1)
nloss, prec1 = self.__L__.forward(outp,label)
return nloss, prec1
class ModelTrainer(object):
def __init__(self, speaker_model, optimizer, scheduler, gpu, mixedprec, **kwargs):
self.__model__ = speaker_model
Optimizer = importlib.import_module('optimizer.'+optimizer).__getattribute__('Optimizer')
self.__optimizer__ = Optimizer(self.__model__.parameters(), **kwargs)
Scheduler = importlib.import_module('scheduler.'+scheduler).__getattribute__('Scheduler')
self.__scheduler__, self.lr_step = Scheduler(self.__optimizer__, **kwargs)
self.scaler = GradScaler()
self.gpu = gpu
self.mixedprec = mixedprec
assert self.lr_step in ['epoch', 'iteration']
# ## ===== ===== ===== ===== ===== ===== ===== =====
# ## Train network
# ## ===== ===== ===== ===== ===== ===== ===== =====
def train_network(self, loader, verbose):
self.__model__.train();
stepsize = loader.batch_size;
counter = 0;
index = 0;
loss = 0;
top1 = 0 # EER or accuracy
tstart = time.time()
for data, data_label in loader:
data = data.transpose(1,0)
self.__model__.zero_grad();
label = torch.LongTensor(data_label).cuda()
if self.mixedprec:
with autocast():
nloss, prec1 = self.__model__(data, label)
self.scaler.scale(nloss).backward();
self.scaler.step(self.__optimizer__);
self.scaler.update();
else:
nloss, prec1 = self.__model__(data, label)
nloss.backward();
self.__optimizer__.step();
loss += nloss.detach().cpu();
top1 += prec1.detach().cpu()
counter += 1;
index += stepsize;
telapsed = time.time() - tstart
tstart = time.time()
if verbose:
sys.stdout.write("\rProcessing (%d) "%(index));
sys.stdout.write("Loss %f TEER/TAcc %2.3f%% - %.2f Hz "%(loss/counter, top1/counter, stepsize/telapsed));
sys.stdout.flush();
if self.lr_step == 'iteration': self.__scheduler__.step()
if self.lr_step == 'epoch': self.__scheduler__.step()
sys.stdout.write("\n");
return (loss/counter, top1/counter);
## ===== ===== ===== ===== ===== ===== ===== =====
## Evaluate from list
## ===== ===== ===== ===== ===== ===== ===== =====
def evaluateFromList(self, test_list, test_path, nDataLoaderThread, print_interval=100, num_eval=10, **kwargs):
self.__model__.eval();
lines = []
files = []
feats = {}
tstart = time.time()
## Read all lines
with open(test_list) as f:
lines = f.readlines()
## Get a list of unique file names
files = sum([x.strip().split()[-2:] for x in lines],[])
setfiles = list(set(files))
setfiles.sort()
## Define test data loader
test_dataset = test_dataset_loader(setfiles, test_path, num_eval=num_eval, **kwargs)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=nDataLoaderThread,
drop_last=False,
)
## Extract features for every image
for idx, data in enumerate(test_loader):
inp1 = data[0][0].cuda()
ref_feat = self.__model__(inp1).detach().cpu()
feats[data[1][0]] = ref_feat
telapsed = time.time() - tstart
if idx % print_interval == 0:
sys.stdout.write("\rReading %d of %d: %.2f Hz, embedding size %d"%(idx,len(setfiles),idx/telapsed,ref_feat.size()[1]));
print('')
all_scores = [];
all_labels = [];
all_trials = [];
tstart = time.time()
## Read files and compute all scores
for idx, line in enumerate(lines):
data = line.split();
## Append random label if missing
if len(data) == 2: data = [random.randint(0,1)] + data
ref_feat = feats[data[1]].cuda()
com_feat = feats[data[2]].cuda()
if self.__model__.module.__L__.test_normalize:
ref_feat = F.normalize(ref_feat, p=2, dim=1)
com_feat = F.normalize(com_feat, p=2, dim=1)
dist = F.pairwise_distance(ref_feat.unsqueeze(-1), com_feat.unsqueeze(-1).transpose(0,2)).detach().cpu().numpy();
score = -1 * numpy.mean(dist);
all_scores.append(score);
all_labels.append(int(data[0]));
all_trials.append(data[1]+" "+data[2])
if idx % print_interval == 0:
telapsed = time.time() - tstart
sys.stdout.write("\rComputing %d of %d: %.2f Hz"%(idx,len(lines),idx/telapsed));
sys.stdout.flush();
print('')
return (all_scores, all_labels, all_trials);
## ===== ===== ===== ===== ===== ===== ===== =====
## Save parameters
## ===== ===== ===== ===== ===== ===== ===== =====
def saveParameters(self, path):
torch.save(self.__model__.module.state_dict(), path);
## ===== ===== ===== ===== ===== ===== ===== =====
## Load parameters
## ===== ===== ===== ===== ===== ===== ===== =====
def loadParameters(self, path):
self_state = self.__model__.module.state_dict();
loaded_state = torch.load(path, map_location="cuda:%d"%self.gpu);
for name, param in loaded_state.items():
origname = name;
if name not in self_state:
name = name.replace("module.", "");
if name not in self_state:
print("%s is not in the model."%origname);
continue;
if self_state[name].size() != loaded_state[origname].size():
print("Wrong parameter length: %s, model: %s, loaded: %s"%(origname, self_state[name].size(), loaded_state[origname].size()));
continue;
self_state[name].copy_(param); | [
"torch.nn.functional.normalize",
"torch.cuda.amp.autocast",
"torch.LongTensor",
"torch.utils.data.DataLoader",
"torch.load",
"torch.cuda.amp.GradScaler"
] | 1.6.0 | llongquoc/voxceleb_trainer | 7442ea6c1a4b4fde3bcec44a4538d6a515e5f292 |
1.6 | from machin.auto.config import (
generate_algorithm_config,
generate_env_config,
generate_training_config,
launch,
)
import torch as t
import torch.nn as nn
class SomeQNet(nn.Module):
def __init__(self, state_dim, action_num):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_num)
def forward(self, state):
a = t.relu(self.fc1(state))
a = t.relu(self.fc2(a))
return self.fc3(a)
if __name__ == "__main__":
config = generate_algorithm_config("DQN")
config = generate_env_config("openai_gym", config)
config = generate_training_config(
root_dir="trial", episode_per_epoch=10, max_episodes=10000, config=config
)
config["frame_config"]["models"] = ["SomeQNet", "SomeQNet"]
config["frame_config"]["model_kwargs"] = [{"state_dim": 4, "action_num": 2}] * 2
launch(config)
| [
"torch.nn.Linear"
] | 1.6.0 | lorenzosteccanella/machin | 9d3ce87dbed820b5019211b0690b54613084d9e4 |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import is_main_process
from .position_encoding import build_position_encoding
from timm import create_model
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = backbone
self.num_channels = num_channels
def forward(self):
#xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs:
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = create_model(name,pretrained=True)
num_channels = 320
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = create_model(args.backbone,pretrained=True,features_only=True,out_indices= (2, 3, 4))
#model = backbone
model = Joiner(backbone, position_embedding)
model.num_channels = 320
return model
| [
"torch.zeros",
"torch.ones"
] | 1.5.0 | Aliweka2020/EfficientDETR | 6557dca4e969d58f15555fe030a7ad7a0bacef7c |
1.7 | from typing import Dict, Optional, List, Any
from overrides import overrides
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask, masked_softmax, weighted_sum
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("decomposable_attention")
class DecomposableAttention(Model):
"""
This `Model` implements the Decomposable Attention model described in [A Decomposable
Attention Model for Natural Language Inference](https://api.semanticscholar.org/CorpusID:8495258)
by Parikh et al., 2016, with some optional enhancements before the decomposable attention
actually happens. Parikh's original model allowed for computing an "intra-sentence" attention
before doing the decomposable entailment step. We generalize this to any
[`Seq2SeqEncoder`](../modules/seq2seq_encoders/seq2seq_encoder.md) that can be applied to
the premise and/or the hypothesis before computing entailment.
The basic outline of this model is to get an embedded representation of each word in the
premise and hypothesis, align words between the two, compare the aligned phrases, and make a
final entailment decision based on this aggregated comparison. Each step in this process uses
a feedforward network to modify the representation.
Registered as a `Model` with name "decomposable_attention".
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the `premise` and `hypothesis` `TextFields` we get as input to the
model.
attend_feedforward : `FeedForward`
This feedforward network is applied to the encoded sentence representations before the
similarity matrix is computed between words in the premise and words in the hypothesis.
matrix_attention : `MatrixAttention`
This is the attention function used when computing the similarity matrix between words in
the premise and words in the hypothesis.
compare_feedforward : `FeedForward`
This feedforward network is applied to the aligned premise and hypothesis representations,
individually.
aggregate_feedforward : `FeedForward`
This final feedforward network is applied to the concatenated, summed result of the
`compare_feedforward` network, and its output is used as the entailment class logits.
premise_encoder : `Seq2SeqEncoder`, optional (default=`None`)
After embedding the premise, we can optionally apply an encoder. If this is `None`, we
will do nothing.
hypothesis_encoder : `Seq2SeqEncoder`, optional (default=`None`)
After embedding the hypothesis, we can optionally apply an encoder. If this is `None`,
we will use the `premise_encoder` for the encoding (doing nothing if `premise_encoder`
is also `None`).
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
attend_feedforward: FeedForward,
matrix_attention: MatrixAttention,
compare_feedforward: FeedForward,
aggregate_feedforward: FeedForward,
premise_encoder: Optional[Seq2SeqEncoder] = None,
hypothesis_encoder: Optional[Seq2SeqEncoder] = None,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
self._attend_feedforward = TimeDistributed(attend_feedforward)
self._matrix_attention = matrix_attention
self._compare_feedforward = TimeDistributed(compare_feedforward)
self._aggregate_feedforward = aggregate_feedforward
self._premise_encoder = premise_encoder
self._hypothesis_encoder = hypothesis_encoder or premise_encoder
self._num_labels = vocab.get_vocab_size(namespace="labels")
check_dimensions_match(
text_field_embedder.get_output_dim(),
attend_feedforward.get_input_dim(),
"text field embedding dim",
"attend feedforward input dim",
)
check_dimensions_match(
aggregate_feedforward.get_output_dim(),
self._num_labels,
"final output dimension",
"number of labels",
)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward( # type: ignore
self,
premise: TextFieldTensors,
hypothesis: TextFieldTensors,
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
premise : `TextFieldTensors`
From a `TextField`
hypothesis : `TextFieldTensors`
From a `TextField`
label : `torch.IntTensor`, optional (default = `None`)
From a `LabelField`
metadata : `List[Dict[str, Any]]`, optional (default = `None`)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
# Returns
An output dictionary consisting of:
label_logits : `torch.FloatTensor`
A tensor of shape `(batch_size, num_labels)` representing unnormalised log
probabilities of the entailment label.
label_probs : `torch.FloatTensor`
A tensor of shape `(batch_size, num_labels)` representing probabilities of the
entailment label.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
embedded_premise = self._text_field_embedder(premise)
embedded_hypothesis = self._text_field_embedder(hypothesis)
premise_mask = get_text_field_mask(premise)
hypothesis_mask = get_text_field_mask(hypothesis)
if self._premise_encoder:
embedded_premise = self._premise_encoder(embedded_premise, premise_mask)
if self._hypothesis_encoder:
embedded_hypothesis = self._hypothesis_encoder(embedded_hypothesis, hypothesis_mask)
projected_premise = self._attend_feedforward(embedded_premise)
projected_hypothesis = self._attend_feedforward(embedded_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
similarity_matrix = self._matrix_attention(projected_premise, projected_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
p2h_attention = masked_softmax(similarity_matrix, hypothesis_mask)
# Shape: (batch_size, premise_length, embedding_dim)
attended_hypothesis = weighted_sum(embedded_hypothesis, p2h_attention)
# Shape: (batch_size, hypothesis_length, premise_length)
h2p_attention = masked_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask)
# Shape: (batch_size, hypothesis_length, embedding_dim)
attended_premise = weighted_sum(embedded_premise, h2p_attention)
premise_compare_input = torch.cat([embedded_premise, attended_hypothesis], dim=-1)
hypothesis_compare_input = torch.cat([embedded_hypothesis, attended_premise], dim=-1)
compared_premise = self._compare_feedforward(premise_compare_input)
compared_premise = compared_premise * premise_mask.unsqueeze(-1)
# Shape: (batch_size, compare_dim)
compared_premise = compared_premise.sum(dim=1)
compared_hypothesis = self._compare_feedforward(hypothesis_compare_input)
compared_hypothesis = compared_hypothesis * hypothesis_mask.unsqueeze(-1)
# Shape: (batch_size, compare_dim)
compared_hypothesis = compared_hypothesis.sum(dim=1)
aggregate_input = torch.cat([compared_premise, compared_hypothesis], dim=-1)
label_logits = self._aggregate_feedforward(aggregate_input)
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
output_dict = {
"label_logits": label_logits,
"label_probs": label_probs,
"h2p_attention": h2p_attention,
"p2h_attention": p2h_attention,
}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label)
output_dict["loss"] = loss
if metadata is not None:
output_dict["premise_tokens"] = [x["premise_tokens"] for x in metadata]
output_dict["hypothesis_tokens"] = [x["hypothesis_tokens"] for x in metadata]
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self._accuracy.get_metric(reset)}
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the probabilities, converts index to string label, and
add `"label"` key to the dictionary with the result.
"""
predictions = output_dict["label_probs"]
if predictions.dim() == 2:
predictions_list = [predictions[i] for i in range(predictions.shape[0])]
else:
predictions_list = [predictions]
classes = []
for prediction in predictions_list:
label_idx = prediction.argmax(dim=-1).item()
label_str = self.vocab.get_index_to_token_vocabulary("labels").get(
label_idx, str(label_idx)
)
classes.append(label_str)
output_dict["label"] = classes
return output_dict
default_predictor = "textual_entailment"
| [
"torch.cat",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax"
] | 1.7.0 | zhiyangxu-umass/allennlp-models | d922f7a8075387ebed1a3e38e588345f706d3f02 |
1.7 | import warnings
from typing import Dict, List, Any, Union
from overrides import overrides
import torch
from torch.nn.modules import Linear, Dropout
import torch.nn.functional as F
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.bert.modeling_bert import BertModel
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, util
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, viterbi_decode
from allennlp_models.structured_prediction.metrics.srl_eval_scorer import (
DEFAULT_SRL_EVAL_PATH,
SrlEvalScorer,
)
@Model.register("srl_bert")
class SrlBert(Model):
"""
A BERT based model [Simple BERT Models for Relation Extraction and Semantic Role Labeling (Shi et al, 2019)]
(https://arxiv.org/abs/1904.05255) with some modifications (no additional parameters apart from a linear
classification layer), which is currently the state-of-the-art single model for English PropBank SRL
(Newswire sentences).
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
bert_model : `Union[str, Dict[str, Any], BertModel]`, required.
A string describing the BERT model to load, a BERT config in the form of a dictionary,
or an already constructed BertModel.
!!! Note
If you pass a config `bert_model` (a dictionary), pretrained weights will
not be cached and loaded! This is ideal if you're loading this model from an
AllenNLP archive since the weights you need will already be included in the
archive, but not what you want if you're training.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
label_smoothing : `float`, optional (default = `0.0`)
Whether or not to use label smoothing on the labels when computing cross entropy loss.
ignore_span_metric : `bool`, optional (default = `False`)
Whether to calculate span loss, which is irrelevant when predicting BIO for Open Information Extraction.
srl_eval_path : `str`, optional (default=`DEFAULT_SRL_EVAL_PATH`)
The path to the srl-eval.pl script. By default, will use the srl-eval.pl included with allennlp,
which is located at allennlp/tools/srl-eval.pl . If `None`, srl-eval.pl is not used.
"""
def __init__(
self,
vocab: Vocabulary,
bert_model: Union[str, Dict[str, Any], BertModel],
embedding_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
label_smoothing: float = None,
ignore_span_metric: bool = False,
srl_eval_path: str = DEFAULT_SRL_EVAL_PATH,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
if isinstance(bert_model, str):
self.bert_model = BertModel.from_pretrained(bert_model)
elif isinstance(bert_model, dict):
warnings.warn(
"Initializing BertModel without pretrained weights. This is fine if you're loading "
"from an AllenNLP archive, but not if you're training.",
UserWarning,
)
bert_config = BertConfig.from_dict(bert_model)
self.bert_model = BertModel(bert_config)
else:
self.bert_model = bert_model
self.num_classes = self.vocab.get_vocab_size("labels")
if srl_eval_path is not None:
# For the span based evaluation, we don't want to consider labels
# for verb, because the verb index is provided to the model.
self.span_metric = SrlEvalScorer(srl_eval_path, ignore_classes=["V"])
else:
self.span_metric = None
self.tag_projection_layer = Linear(self.bert_model.config.hidden_size, self.num_classes)
self.embedding_dropout = Dropout(p=embedding_dropout)
self._label_smoothing = label_smoothing
self.ignore_span_metric = ignore_span_metric
initializer(self)
def forward( # type: ignore
self,
tokens: TextFieldTensors,
verb_indicator: torch.Tensor,
metadata: List[Any],
tags: torch.LongTensor = None,
):
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. For this model, this must be a `SingleIdTokenIndexer` which
indexes wordpieces from the BERT vocabulary.
verb_indicator: `torch.LongTensor`, required.
An integer `SequenceFeatureField` representation of the position of the verb
in the sentence. This should have shape (batch_size, num_tokens) and importantly, can be
all zeros, in the case that the sentence has no verbal predicate.
tags : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer gold class labels
of shape `(batch_size, num_tokens)`
metadata : `List[Dict[str, Any]]`, optional, (default = `None`)
metadata containing the original words in the sentence, the verb to compute the
frame for, and start offsets for converting wordpieces back to a sequence of words,
under 'words', 'verb' and 'offsets' keys, respectively.
# Returns
An output dictionary consisting of:
logits : `torch.FloatTensor`
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
unnormalised log probabilities of the tag classes.
class_probabilities : `torch.FloatTensor`
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
a distribution of the tag classes per word.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
mask = get_text_field_mask(tokens)
bert_embeddings, _ = self.bert_model(
input_ids=util.get_token_ids_from_text_field_tensors(tokens),
token_type_ids=verb_indicator,
attention_mask=mask,
return_dict=False,
)
embedded_text_input = self.embedding_dropout(bert_embeddings)
batch_size, sequence_length, _ = embedded_text_input.size()
logits = self.tag_projection_layer(embedded_text_input)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view(
[batch_size, sequence_length, self.num_classes]
)
output_dict = {"logits": logits, "class_probabilities": class_probabilities}
# We need to retain the mask in the output dictionary
# so that we can crop the sequences to remove padding
# when we do viterbi inference in self.make_output_human_readable.
output_dict["mask"] = mask
# We add in the offsets here so we can compute the un-wordpieced tags.
words, verbs, offsets = zip(*[(x["words"], x["verb"], x["offsets"]) for x in metadata])
output_dict["words"] = list(words)
output_dict["verb"] = list(verbs)
output_dict["wordpiece_offsets"] = list(offsets)
if tags is not None:
loss = sequence_cross_entropy_with_logits(
logits, tags, mask, label_smoothing=self._label_smoothing
)
if not self.ignore_span_metric and self.span_metric is not None and not self.training:
batch_verb_indices = [
example_metadata["verb_index"] for example_metadata in metadata
]
batch_sentences = [example_metadata["words"] for example_metadata in metadata]
# Get the BIO tags from make_output_human_readable()
# TODO (nfliu): This is kind of a hack, consider splitting out part
# of make_output_human_readable() to a separate function.
batch_bio_predicted_tags = self.make_output_human_readable(output_dict).pop("tags")
from allennlp_models.structured_prediction.models.srl import (
convert_bio_tags_to_conll_format,
)
batch_conll_predicted_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_predicted_tags
]
batch_bio_gold_tags = [
example_metadata["gold_tags"] for example_metadata in metadata
]
batch_conll_gold_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_gold_tags
]
self.span_metric(
batch_verb_indices,
batch_sentences,
batch_conll_predicted_tags,
batch_conll_gold_tags,
)
output_dict["loss"] = loss
print("begin:")
print(output_dict['words'][0])
print(output_dict["mask"][0])
print(output_dict['wordpiece_tags'][0])
print(metadata[0]['wordpieces'])
print(output_dict['wordpiece_offsets'][0])
print(tags[0])
print(batch_bio_predicted_tags[0])
print(batch_bio_gold_tags[0])
return output_dict
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does constrained viterbi decoding on class probabilities output in :func:`forward`. The
constraint simply specifies that the output tags must be a valid BIO sequence. We add a
`"tags"` key to the dictionary with the result.
NOTE: First, we decode a BIO sequence on top of the wordpieces. This is important; viterbi
decoding produces low quality output if you decode on top of word representations directly,
because the model gets confused by the 'missing' positions (which is sensible as it is trained
to perform tagging on wordpieces, not words).
Secondly, it's important that the indices we use to recover words from the wordpieces are the
start_offsets (i.e offsets which correspond to using the first wordpiece of words which are
tokenized into multiple wordpieces) as otherwise, we might get an ill-formed BIO sequence
when we select out the word tags from the wordpiece tags. This happens in the case that a word
is split into multiple word pieces, and then we take the last tag of the word, which might
correspond to, e.g, I-V, which would not be allowed as it is not preceeded by a B tag.
"""
all_predictions = output_dict["class_probabilities"]
sequence_lengths = get_lengths_from_binary_sequence_mask(output_dict["mask"]).data.tolist()
if all_predictions.dim() == 3:
predictions_list = [
all_predictions[i].detach().cpu() for i in range(all_predictions.size(0))
]
else:
predictions_list = [all_predictions]
wordpiece_tags = []
word_tags = []
transition_matrix = self.get_viterbi_pairwise_potentials()
start_transitions = self.get_start_transitions()
# **************** Different ********************
# We add in the offsets here so we can compute the un-wordpieced tags.
for predictions, length, offsets in zip(
predictions_list, sequence_lengths, output_dict["wordpiece_offsets"]
):
max_likelihood_sequence, _ = viterbi_decode(
predictions[:length], transition_matrix, allowed_start_transitions=start_transitions
)
tags = [
self.vocab.get_token_from_index(x, namespace="labels")
for x in max_likelihood_sequence
]
wordpiece_tags.append(tags)
word_tags.append([tags[i] for i in offsets])
output_dict["wordpiece_tags"] = wordpiece_tags
output_dict["tags"] = word_tags
return output_dict
def get_metrics(self, reset: bool = False):
if self.ignore_span_metric:
# Return an empty dictionary if ignoring the
# span metric
return {}
else:
metric_dict = self.span_metric.get_metric(reset=reset)
# This can be a lot of metrics, as there are 3 per class.
# we only really care about the overall metrics, so we filter for them here.
return {x: y for x, y in metric_dict.items() if "overall" in x}
def get_viterbi_pairwise_potentials(self):
"""
Generate a matrix of pairwise transition potentials for the BIO labels.
The only constraint implemented here is that I-XXX labels must be preceded
by either an identical I-XXX tag or a B-XXX tag. In order to achieve this
constraint, pairs of labels which do not satisfy this constraint have a
pairwise potential of -inf.
# Returns
transition_matrix : `torch.Tensor`
A `(num_labels, num_labels)` matrix of pairwise potentials.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
transition_matrix = torch.zeros([num_labels, num_labels])
for i, previous_label in all_labels.items():
for j, label in all_labels.items():
# I labels can only be preceded by themselves or
# their corresponding B tag.
if i != j and label[0] == "I" and not previous_label == "B" + label[1:]:
transition_matrix[i, j] = float("-inf")
return transition_matrix
def get_start_transitions(self):
"""
In the BIO sequence, we cannot start the sequence with an I-XXX tag.
This transition sequence is passed to viterbi_decode to specify this constraint.
# Returns
start_transitions : `torch.Tensor`
The pairwise potentials between a START token and
the first token of the sequence.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
start_transitions = torch.zeros(num_labels)
for i, label in all_labels.items():
if label[0] == "I":
start_transitions[i] = float("-inf")
return start_transitions
default_predictor = "semantic_role_labeling"
| [
"torch.zeros",
"torch.nn.modules.Dropout",
"torch.nn.modules.Linear",
"torch.nn.functional.softmax"
] | 1.7.0 | zhiyangxu-umass/allennlp-models | d922f7a8075387ebed1a3e38e588345f706d3f02 |
1.5 | import logging.config
import os
from pathlib import Path
import torch
from transformers import set_seed as hf_set_seed
# global variable: cache_root
cache_root = Path(os.getenv("FLAIR_CACHE_ROOT", Path(Path.home(), ".flair")))
# global variable: device
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
# global variable: version
__version__ = "0.11"
# global variable: arrow symbol
_arrow = " → "
from . import ( # noqa: E402 import after setting device
data,
models,
nn,
trainers,
visual,
)
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {"standard": {"format": "%(asctime)-15s %(message)s"}},
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "standard",
"stream": "ext://sys.stdout",
}
},
"loggers": {"flair": {"handlers": ["console"], "level": "INFO", "propagate": False}},
}
)
logger = logging.getLogger("flair")
def set_seed(seed: int):
hf_set_seed(seed)
__all__ = [
"cache_root",
"device",
"__version__",
"logger",
"set_seed",
"data",
"models",
"nn",
"trainers",
"visual",
"datasets",
]
| [
"torch.device",
"torch.cuda.is_available"
] | 1.5.0 | chen-yuxuan/flair | 480d2c9afd66ab8d3bf40a676917e84dba3c4cee |
0.4 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from .kernel import Kernel
from ..lazy import ToeplitzLazyTensor, KroneckerProductLazyTensor
from .. import settings
class GridKernel(Kernel):
r"""
If the input data :math:`X` are regularly spaced on a grid, then
`GridKernel` can dramatically speed up computatations for stationary kernel.
GridKernel exploits Toeplitz and Kronecker structure within the covariance matrix.
See `Fast kernel learning for multidimensional pattern extrapolation`_ for more info.
.. note::
`GridKernel` can only wrap **stationary kernels** (such as RBF, Matern,
Periodic, Spectral Mixture, etc.)
Args:
:attr:`base_kernel` (Kernel):
The kernel to speed up with grid methods.
:attr:`inducing_points` (Tensor, n x d):
This will be the set of points that lie on the grid.
:attr:`grid` (Tensor, k x d):
The exact grid points.
:attr:`active_dims` (tuple of ints, optional):
Passed down to the `base_kernel`.
.. _Fast kernel learning for multidimensional pattern extrapolation:
http://www.cs.cmu.edu/~andrewgw/manet.pdf
"""
def __init__(self, base_kernel, inducing_points, grid, active_dims=None):
super(GridKernel, self).__init__(active_dims=active_dims)
self.base_kernel = base_kernel
if inducing_points.ndimension() != 2:
raise RuntimeError("Inducing points should be 2 dimensional")
self.register_buffer("inducing_points", inducing_points.unsqueeze(0))
self.register_buffer("grid", grid)
def train(self, mode=True):
if hasattr(self, "_cached_kernel_mat"):
del self._cached_kernel_mat
return super(GridKernel, self).train(mode)
def update_inducing_points_and_grid(self, inducing_points, grid):
"""
Supply a new set of `inducing_points` and a new `grid` if they ever change.
"""
self.inducing_points.detach().resize_(inducing_points.size()).copy_(inducing_points)
self.grid.detach().resize_(grid.size()).copy_(grid)
if hasattr(self, "_cached_kernel_mat"):
del self._cached_kernel_mat
return self
def forward(self, x1, x2, diag=False, batch_dims=None, **params):
if not torch.equal(x1, self.inducing_points) or not torch.equal(x2, self.inducing_points):
raise RuntimeError("The kernel should only receive the inducing points as input")
if not self.training and hasattr(self, "_cached_kernel_mat"):
covar = self._cached_kernel_mat
else:
n_dim = x1.size(-1)
grid = self.grid.unsqueeze(0)
if settings.use_toeplitz.on():
first_item = grid[:, 0:1]
covar_columns = self.base_kernel(first_item, grid, diag=False, batch_dims=(0, 2), **params)
covar_columns = covar_columns.evaluate().squeeze(-2)
if batch_dims == (0, 2):
covars = [ToeplitzLazyTensor(covar_columns)]
else:
covars = [ToeplitzLazyTensor(covar_columns[i : i + 1]) for i in range(n_dim)]
else:
full_covar = self.base_kernel(grid, grid, batch_dims=(0, 2), **params).evaluate_kernel()
if batch_dims == (0, 2):
covars = [full_covar]
else:
covars = [full_covar[i : i + 1] for i in range(n_dim)]
if len(covars) > 1:
covar = KroneckerProductLazyTensor(*covars)
else:
covar = covars[0]
if not self.training:
self._cached_kernel_mat = covar
return covar
| [
"torch.equal"
] | 0.4.1 | konstantinklemmer/gpytorch | f1d947b340a188c398b6c6e610b6a839c61aa298 |
0.4 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from ..lazy import LazyTensor
class NonLazyTensor(LazyTensor):
def __init__(self, tsr):
"""
Not a lazy tensor
Args:
- tsr (Tensor: matrix) a Tensor
"""
if not torch.is_tensor(tsr):
raise RuntimeError("NonLazyTensor must take a torch.Tensor; got {}".format(tsr.__class__.__name__))
super(NonLazyTensor, self).__init__(tsr)
self.tensor = tsr
def _matmul(self, rhs):
return torch.matmul(self.tensor, rhs)
def _t_matmul(self, rhs):
return torch.matmul(self.tensor.transpose(-1, -2), rhs)
def _quad_form_derivative(self, left_vecs, right_vecs):
if left_vecs.ndimension() < self.tensor.ndimension():
left_vecs = left_vecs.unsqueeze(-1)
right_vecs = right_vecs.unsqueeze(-1)
res = left_vecs.matmul(right_vecs.transpose(-1, -2))
return (res,)
def _size(self):
return self.tensor.size()
def _transpose_nonbatch(self):
return NonLazyTensor(self.tensor.transpose(-1, -2))
def _batch_get_indices(self, batch_indices, left_indices, right_indices):
return self.tensor[batch_indices, left_indices, right_indices]
def _get_indices(self, left_indices, right_indices):
return self.tensor[left_indices, right_indices]
def _preconditioner(self):
# For a NonLazyTensor, it is intended to not use preconditioning, even when called for.
return None, None
def diag(self):
if self.tensor.ndimension() < 3:
return self.tensor.diag()
else:
shape = self.shape
batch_iter = torch.arange(0, shape[0], dtype=torch.long, device=self.device)
row_col_iter = torch.arange(0, shape[-1], dtype=torch.long, device=self.device)
batch_iter = batch_iter.unsqueeze(1).repeat(1, shape[1]).view(-1)
row_col_iter = row_col_iter.unsqueeze(1).repeat(shape[0], 1).view(-1)
return self.tensor[batch_iter, row_col_iter, row_col_iter].view(shape[:-1])
def evaluate(self):
return self.tensor
def repeat(self, *sizes):
return NonLazyTensor(self.tensor.repeat(*sizes))
def __getitem__(self, index):
res = self.tensor[index]
if not isinstance(index, tuple):
index = (index,)
if len(index) >= self.ndimension() - 1:
row_index = index[self.ndimension() - 2]
if isinstance(row_index, int) or torch.is_tensor(row_index):
return res
if len(index) == self.ndimension():
col_index = index[self.ndimension() - 1]
if isinstance(col_index, int) or torch.is_tensor(col_index):
return res
return NonLazyTensor(res)
| [
"torch.is_tensor",
"torch.matmul",
"torch.arange"
] | 0.4.1 | konstantinklemmer/gpytorch | f1d947b340a188c398b6c6e610b6a839c61aa298 |
0.4 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import torch
from .kernel import Kernel
class CosineKernel(Kernel):
r"""
Computes a covariance matrix based on the cosine kernel
between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
.. math::
\begin{equation*}
k_{\text{Cosine}}(\mathbf{x_1}, \mathbf{x_2}) = \cos \left(
\pi \Vert \mathbf{x_1} - \mathbf{x_2} \Vert_2 / p \right)
\end{equation*}
where :math:`p` is the periord length parameter.
Args:
:attr:`batch_size` (int, optional):
Set this if you want a separate lengthscale for each
batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `1`
:attr:`active_dims` (tuple of ints, optional):
Set this if you want to compute the covariance of only a few input dimensions. The ints
corresponds to the indices of the dimensions. Default: `None`.
:attr:`log_period_length_prior` (Prior, optional):
Set this if you want to apply a prior to the period length parameter. Default: `None`
:attr:`eps` (float):
The minimum value that the lengthscale/period length can take
(prevents divide by zero errors). Default: `1e-6`.
Attributes:
:attr:`period_length` (Tensor):
The period length parameter. Size = `batch_size x 1 x 1`.
Example:
>>> x = torch.randn(10, 5)
>>> # Non-batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.CosineKernel())
>>>
>>> batch_x = torch.randn(2, 10, 5)
>>> # Batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.CosineKernel())
>>> # Batch: different lengthscale for each batch
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.CosineKernel(batch_size=2))
>>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)
"""
def __init__(self, active_dims=None, batch_size=1, log_period_length_prior=None, eps=1e-6):
super(CosineKernel, self).__init__(has_lengthscale=False, active_dims=active_dims)
self.eps = eps
self.register_parameter(
name="log_period_length",
parameter=torch.nn.Parameter(torch.zeros(batch_size, 1, 1)),
prior=log_period_length_prior,
)
@property
def period_length(self):
return self.log_period_length.exp().clamp(self.eps, 1e5)
def forward(self, x1, x2, **params):
x1_ = x1.div(self.period_length)
x2_ = x2.div(self.period_length)
x1_, x2_ = self._create_input_grid(x1_, x2_, **params)
diff = torch.norm((x1_ - x2_).abs(), 2, -1)
res = torch.cos(diff.mul(math.pi))
return res
| [
"torch.zeros"
] | 0.4.1 | konstantinklemmer/gpytorch | f1d947b340a188c398b6c6e610b6a839c61aa298 |
1.2 | import copy
import numpy as np
import torch
import os
import sys
sys.path.insert(0, os.environ['ALFRED_ROOT'])
from agents.utils.misc import extract_admissible_commands
def evaluate_vision_dagger(env, agent, num_games, debug=False):
env.seed(42)
agent.eval()
episode_no = 0
res_points, res_steps, res_gcs = [], [], []
res_info = []
with torch.no_grad():
while(True):
if episode_no >= num_games:
break
obs, infos = env.reset()
game_names = infos["extra.gamefile"]
batch_size = len(obs)
agent.init(batch_size)
previous_dynamics = None
execute_actions = []
prev_step_dones, prev_rewards = [], []
for _ in range(batch_size):
execute_actions.append("restart")
prev_step_dones.append(0.0)
prev_rewards.append(0.0)
observation_strings = list(obs)
observation_strings = agent.preprocess_observation(observation_strings)
task_desc_strings, observation_strings = agent.get_task_and_obs(observation_strings)
first_sight_strings = copy.deepcopy(observation_strings)
agent.observation_pool.push_first_sight(first_sight_strings)
if agent.action_space == "exhaustive":
action_candidate_list = [extract_admissible_commands(intro, obs) for intro, obs in zip(first_sight_strings, observation_strings)]
else:
action_candidate_list = list(infos["admissible_commands"])
action_candidate_list = agent.preprocess_action_candidates(action_candidate_list)
# extract exploration frame features
if agent.use_exploration_frame_feats:
exploration_frames = env.get_exploration_frames()
exploration_frame_feats = agent.extract_exploration_frame_feats(exploration_frames)
if debug:
print(first_sight_strings[0])
print(task_desc_strings[0])
still_running_mask = []
sequence_game_points = []
goal_condition_points = []
print_actions = []
report = agent.report_frequency > 0 and (episode_no % agent.report_frequency <= (episode_no - batch_size) % agent.report_frequency)
for step_no in range(agent.max_nb_steps_per_episode):
# get visual features
current_frames = env.get_frames()
observation_feats = agent.extract_visual_features(current_frames)
# add exploration features if specified
if agent.use_exploration_frame_feats:
observation_feats = [torch.cat([ef, obs], dim=0) for ef, obs in zip(exploration_frame_feats, observation_feats)]
# predict actions
if agent.action_space == "generation":
execute_actions, current_dynamics = agent.command_generation_greedy_generation(observation_feats, task_desc_strings, previous_dynamics)
else:
raise NotImplementedError()
obs, _, dones, infos = env.step(execute_actions)
scores = [float(item) for item in infos["won"]]
gcs =[float(item) for item in infos["goal_condition_success_rate"]] if "goal_condition_success_rate" in infos else [0.0]*batch_size
dones = [float(item) for item in dones]
if debug:
print(execute_actions[0])
print(obs[0])
if agent.action_space == "exhaustive":
action_candidate_list = [extract_admissible_commands(intro, obs) for intro, obs in zip(first_sight_strings, observation_strings)]
else:
action_candidate_list = list(infos["admissible_commands"])
action_candidate_list = agent.preprocess_action_candidates(action_candidate_list)
previous_dynamics = current_dynamics
if step_no == agent.max_nb_steps_per_episode - 1:
# terminate the game because DQN requires one extra step
dones = [1.0 for _ in dones]
still_running = [1.0 - float(item) for item in prev_step_dones] # list of float
prev_step_dones = dones
step_rewards = [float(curr) - float(prev) for curr, prev in zip(scores, prev_rewards)] # list of float
prev_rewards = scores
sequence_game_points.append(step_rewards)
goal_condition_points.append(gcs)
still_running_mask.append(still_running)
print_actions.append(execute_actions[0] if still_running[0] else "--")
# if all ended, break
if np.sum(still_running) == 0:
break
game_steps = np.sum(np.array(still_running_mask), 0).tolist() # batch
game_points = np.max(np.array(sequence_game_points), 0).tolist() # batch
game_gcs = np.max(np.array(goal_condition_points), 0).tolist() # batch
for i in range(batch_size):
if len(res_points) >= num_games:
break
res_points.append(game_points[i])
res_gcs.append(game_gcs[i])
res_steps.append(game_steps[i])
res_info.append("/".join(game_names[i].split("/")[-3:-1]) + ", score: " + str(game_points[i]) + ", step: " + str(game_steps[i]))
# finish game
agent.finish_of_episode(episode_no, batch_size)
episode_no += batch_size
if not report:
continue
print("Model: {:s} | Episode: {:3d} | {:s} | game points: {:2.3f} | game goal-condition points: {:2.3f} | game steps: {:2.3f}".format(agent.experiment_tag, episode_no, game_names[0], np.mean(res_points), np.mean(res_gcs), np.mean(res_steps)))
# print(game_id + ": " + " | ".join(print_actions))
print(" | ".join(print_actions))
average_points, average_gc_points, average_steps = np.mean(res_points), np.mean(res_gcs), np.mean(res_steps)
print("================================================")
print("eval game points: " + str(average_points) + ", eval game goal-condition points : " + str(average_gc_points) + ", eval game steps: " + str(average_steps))
for item in res_info:
print(item)
return {
'average_points': average_points,
'average_goal_condition_points': average_gc_points,
'average_steps': average_steps,
'res_points': res_points,
'res_gcs': res_gcs,
'res_steps': res_steps,
'res_info': res_info
}
| [
"torch.cat",
"torch.no_grad"
] | 1.2.0 | 594zyc/alfworld | 704922821fb0a2ff92dfc3f3a5033ba498cb3932 |
1.4 | import torch
import torch.nn as nn
from args import get_parser
# read parser
parser = get_parser()
args = parser.parse_args()
class Norm(nn.Module):
def forward(self, input, p=2, dim=1, eps=1e-12):
return input / input.norm(p, dim, keepdim=True).clamp(min=eps).expand_as(input)
class LstmFlatten(nn.Module):
def forward(self, x):
return x[0].squeeze(1)
# coord network
class CoordNet(nn.Module):
def __init__(self, tags=args.cell_dim, dropout=args.dropout):
super(CoordNet, self).__init__()
self.coord_net = nn.Sequential(
nn.Linear(args.emb_dim, args.emb_dim),
nn.ReLU(),
nn.Flatten(),
nn.Dropout(dropout),
nn.Linear(args.emb_dim, tags)
)
def forward(self, x):
return self.coord_net(x.unsqueeze(1))
# embed images
class LearnImages(nn.Module):
def __init__(self):
super(LearnImages, self).__init__()
self.embedding = nn.Sequential(
nn.Conv1d(in_channels=args.img_dim, out_channels=args.emb_dim, kernel_size=1),
nn.Flatten(),
nn.ReLU(),
nn.Linear(args.emb_dim, args.emb_dim),
nn.Tanh(),
Norm()
)
def forward(self, x):
return self.embedding(x.unsqueeze(2))
# embed summaries
class LearnSummaries(nn.Module):
def __init__(self):
super(LearnSummaries, self).__init__()
self.embedding = nn.Sequential(
nn.LSTM(input_size=args.smr_dim, hidden_size=args.emb_dim, bidirectional=False, batch_first=True),
LstmFlatten(),
nn.ReLU(),
nn.Linear(args.emb_dim, args.emb_dim),
nn.Tanh(),
Norm()
)
def forward(self, x):
return self.embedding(x.unsqueeze(1))
# embed triples
class LearnClasses(nn.Module):
def __init__(self, dropout=args.dropout):
super(LearnClasses, self).__init__()
self.embedding = nn.Sequential(
nn.LSTM(input_size=args.smr_dim, hidden_size=args.emb_dim, bidirectional=False, batch_first=True),
LstmFlatten(),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(args.emb_dim, args.emb_dim),
Norm()
)
def forward(self, x):
return self.embedding(x.unsqueeze(1))
# MLM Baseline model
class MLMBaseline(nn.Module):
def __init__(self):
super(MLMBaseline, self).__init__()
self.learn_img = LearnImages()
self.learn_sum = LearnSummaries()
self.coord_net = CoordNet()
self.learn_cls = LearnClasses()
self.fc1 = torch.nn.Linear(args.emb_dim + args.emb_dim, args.emb_dim)
self.fc2 = torch.nn.Linear(args.cell_dim + args.cell_dim, args.cell_dim)
def forward(self, image, summary, triple):
# input embeddings
img_emb = self.learn_img(image)
sum_emb = self.learn_sum(summary)
# coord embedding
img_coord = self.coord_net(img_emb)
sum_coord = self.coord_net(sum_emb)
# task ir
cls_emb = self.learn_cls(triple)
sum_cls = torch.cat((sum_emb, cls_emb), 1)
sum_cls = self.fc1(sum_cls)
ir = [img_emb, sum_cls]
# task le
cls_coord = self.coord_net(cls_emb)
sum_cls_coord = torch.cat((sum_coord, cls_coord), 1)
sum_cls_coord = self.fc2(sum_cls_coord)
le = [img_coord, sum_cls_coord]
return {
'ir': ir,
'le': le
}
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.nn.Conv1d",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Flatten"
] | 1.4.0 | GOALCLEOPATRA/MLM | 331fd42f8f1f16ad990e66f9f0f873a824f8f849 |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from dataclasses import fields
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
from weakref import proxy
import torch
from torch import optim
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
from pytorch_lightning.utilities.types import _Stateful, LRSchedulerConfig, LRSchedulerTypeTuple, ReduceLROnPlateau
def do_nothing_closure() -> None:
return
class LightningOptimizer:
"""This class is used to wrap the user optimizers and handle properly the backward and optimizer_step logic
across accelerators, AMP, accumulate_grad_batches."""
def __init__(self, optimizer: Optimizer):
# copy most of the `Optimizer` methods into this instance. `__del__` is skipped in case the optimizer has
# implemented custom logic which we would not want to call on destruction of the `LightningOptimizer`
self.__dict__ = {k: v for k, v in optimizer.__dict__.items() if k not in ("step", "__del__")}
# For Horovod
if hasattr(optimizer, "skip_synchronize"):
self.__class__ = type(
"Lightning" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__.__bases__[0]), {}
)
self.skip_synchronize = optimizer.skip_synchronize
self.synchronize = optimizer.synchronize
else:
self.__class__ = type("Lightning" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__), {})
self._optimizer = optimizer
self._strategy: Optional[pl.strategies.Strategy] = None
self._optimizer_idx = 0
@property
def optimizer(self) -> Optimizer:
return self._optimizer
@classmethod
def _to_lightning_optimizer(
cls, optimizer: Union[Optimizer, "LightningOptimizer"], strategy: "pl.strategies.Strategy", opt_idx: int
) -> "LightningOptimizer":
if isinstance(optimizer, LightningOptimizer):
# the user could return a `LightningOptimizer` from `configure_optimizers`, see test:
# tests/core/test_lightning_optimizer.py::test_lightning_optimizer[False]
lightning_optimizer = optimizer
else:
lightning_optimizer = cls(optimizer)
lightning_optimizer._strategy = proxy(strategy)
lightning_optimizer._optimizer_idx = opt_idx
return lightning_optimizer
@contextmanager
def toggle_model(self, sync_grad: bool = True) -> Generator[None, None, None]:
"""This function is just a helper for advanced users.
Considering the current optimizer as A and all other optimizers as B.
Toggling means all parameters from B exclusive to A will have ``requires_grad`` set to False.
When performing gradient accumulation, there is no need to perform grad synchronization
during the accumulation phase.
Setting `sync_grad` to False will block this synchronization and improve performance.
"""
# local import here to avoid circular import
from pytorch_lightning.loops.utilities import _block_parallel_sync_behavior
assert self._strategy is not None
lightning_module = self._strategy.lightning_module
assert lightning_module is not None
with _block_parallel_sync_behavior(self._strategy, block=(not sync_grad)):
lightning_module.toggle_optimizer(self, self._optimizer_idx)
yield
lightning_module.untoggle_optimizer(self._optimizer_idx)
def step(self, closure: Optional[Callable[[], Any]] = None, **kwargs: Any) -> None:
"""Performs a single optimization step (parameter update).
Args:
closure: An optional optimizer_closure.
kwargs: Any additional arguments to the ``optimizer.step()`` call.
Example::
# Scenario for a GAN using manual optimization
def training_step(...):
opt_gen, opt_dis = self.optimizers()
...
# compute generator loss
loss_gen = self.compute_generator_loss(...)
# zero_grad needs to be called before backward
opt_gen.zero_grad()
self.manual_backward(loss_gen)
opt_gen.step()
# compute discriminator loss
loss_dis = self.compute_discriminator_loss(...)
# zero_grad needs to be called before backward
opt_dis.zero_grad()
self.manual_backward(loss_dis)
opt_dis.step()
# A more advanced example
def training_step(self, batch, batch_idx, ...):
opt_gen, opt_dis = self.optimizers()
...
accumulated_grad_batches = batch_idx % 2 == 0
# compute generator loss
def closure_gen():
loss_gen = self.compute_generator_loss(...)
self.manual_backward(loss_gen)
if accumulated_grad_batches:
opt_gen.zero_grad()
with opt_gen.toggle_model(sync_grad=accumulated_grad_batches):
opt_gen.step(closure=closure_gen)
def closure_dis():
loss_dis = self.compute_discriminator_loss(...)
self.manual_backward(loss_dis)
if accumulated_grad_batches:
opt_dis.zero_grad()
with opt_dis.toggle_model(sync_grad=accumulated_grad_batches):
opt_dis.step(closure=closure_dis)
"""
if closure is None:
closure = do_nothing_closure
profiler_action = "optimizer_step_without_closure"
elif not callable(closure):
raise MisconfigurationException("When `optimizer.step(closure)` is called, the closure should be callable")
else:
profiler_action = "optimizer_step_with_closure"
profiler_action += f"_{self._optimizer_idx}"
assert self._strategy is not None
assert self._strategy.lightning_module is not None
with self._strategy.lightning_module.trainer.profiler.profile(profiler_action):
self._strategy.optimizer_step(self._optimizer, self._optimizer_idx, closure, **kwargs)
def _init_optimizers_and_lr_schedulers(
model: "pl.LightningModule",
) -> Tuple[List[Optimizer], List[LRSchedulerConfig], List[int]]:
"""Calls `LightningModule.configure_optimizers` and parses and validates the output."""
optim_conf = model.trainer._call_lightning_module_hook("configure_optimizers", pl_module=model)
if optim_conf is None:
rank_zero_warn(
"`LightningModule.configure_optimizers` returned `None`, this fit will run with no optimizer",
)
optim_conf = _MockOptimizer()
optimizers, lr_schedulers, optimizer_frequencies, monitor = _configure_optimizers(optim_conf)
lr_scheduler_configs = (
_configure_schedulers_automatic_opt(lr_schedulers, monitor)
if model.automatic_optimization
else _configure_schedulers_manual_opt(lr_schedulers)
)
_set_scheduler_opt_idx(optimizers, lr_scheduler_configs)
_validate_scheduler_api(lr_scheduler_configs, model)
return optimizers, lr_scheduler_configs, optimizer_frequencies
def _configure_optimizers(
optim_conf: Union[Dict[str, Any], List, Optimizer, Tuple]
) -> Tuple[List, List, List, Optional[str]]:
optimizers, lr_schedulers, optimizer_frequencies = [], [], []
monitor = None
# single output, single optimizer
if isinstance(optim_conf, Optimizer):
optimizers = [optim_conf]
# two lists, optimizer + lr schedulers
elif (
isinstance(optim_conf, (list, tuple))
and len(optim_conf) == 2
and isinstance(optim_conf[0], list)
and all(isinstance(opt, Optimizer) for opt in optim_conf[0])
):
opt, sch = optim_conf
optimizers = opt
lr_schedulers = sch if isinstance(sch, list) else [sch]
# single dictionary
elif isinstance(optim_conf, dict):
_validate_optim_conf(optim_conf)
optimizers = [optim_conf["optimizer"]]
monitor = optim_conf.get("monitor", None)
lr_schedulers = [optim_conf["lr_scheduler"]] if "lr_scheduler" in optim_conf else []
# multiple dictionaries
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(d, dict) for d in optim_conf):
for opt_dict in optim_conf:
_validate_optim_conf(opt_dict)
optimizers = [opt_dict["optimizer"] for opt_dict in optim_conf]
scheduler_dict = (
lambda scheduler, opt_idx: dict(scheduler, opt_idx=opt_idx)
if isinstance(scheduler, dict)
else {"scheduler": scheduler, "opt_idx": opt_idx}
)
lr_schedulers = [
scheduler_dict(opt_dict["lr_scheduler"], opt_idx)
for opt_idx, opt_dict in enumerate(optim_conf)
if "lr_scheduler" in opt_dict
]
optimizer_frequencies = [
opt_dict["frequency"] for opt_dict in optim_conf if opt_dict.get("frequency", None) is not None
]
# assert that if frequencies are present, they are given for all optimizers
if optimizer_frequencies and len(optimizer_frequencies) != len(optimizers):
raise ValueError("A frequency must be given to each optimizer.")
# single list or tuple, multiple optimizer
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(opt, Optimizer) for opt in optim_conf):
optimizers = list(optim_conf)
# unknown configuration
else:
raise MisconfigurationException(
"Unknown configuration for model optimizers."
" Output from `model.configure_optimizers()` should be one of:\n"
" * `Optimizer`\n"
" * [`Optimizer`]\n"
" * ([`Optimizer`], [`_LRScheduler`])\n"
' * {"optimizer": `Optimizer`, (optional) "lr_scheduler": `_LRScheduler`}\n'
' * A list of the previously described dict format, with an optional "frequency" key (int)'
)
return optimizers, lr_schedulers, optimizer_frequencies, monitor
def _configure_schedulers_automatic_opt(schedulers: list, monitor: Optional[str]) -> List[LRSchedulerConfig]:
"""Convert each scheduler into `LRSchedulerConfig` with relevant information, when using automatic
optimization."""
lr_scheduler_configs = []
for scheduler in schedulers:
if isinstance(scheduler, dict):
# check provided keys
supported_keys = {field.name for field in fields(LRSchedulerConfig)}
extra_keys = scheduler.keys() - supported_keys
if extra_keys:
rank_zero_warn(
f"Found unsupported keys in the lr scheduler dict: {extra_keys}."
" HINT: remove them from the output of `configure_optimizers`.",
category=RuntimeWarning,
)
scheduler = {k: v for k, v in scheduler.items() if k in supported_keys}
if "scheduler" not in scheduler:
raise MisconfigurationException(
'The lr scheduler dict must have the key "scheduler" with its item being an lr scheduler'
)
if "interval" in scheduler and scheduler["interval"] not in ("step", "epoch"):
raise MisconfigurationException(
'The "interval" key in lr scheduler dict must be "step" or "epoch"'
f' but is "{scheduler["interval"]}"'
)
scheduler["reduce_on_plateau"] = isinstance(scheduler["scheduler"], optim.lr_scheduler.ReduceLROnPlateau)
if scheduler["reduce_on_plateau"] and scheduler.get("monitor", None) is None:
raise MisconfigurationException(
"The lr scheduler dict must include a monitor when a `ReduceLROnPlateau` scheduler is used."
' For example: {"optimizer": optimizer, "lr_scheduler":'
' {"scheduler": scheduler, "monitor": "your_loss"}}'
)
is_one_cycle = isinstance(scheduler["scheduler"], optim.lr_scheduler.OneCycleLR)
if is_one_cycle and scheduler.get("interval", "epoch") == "epoch":
rank_zero_warn(
"A `OneCycleLR` scheduler is using 'interval': 'epoch'."
" Are you sure you didn't mean 'interval': 'step'?",
category=RuntimeWarning,
)
config = LRSchedulerConfig(**scheduler)
elif isinstance(scheduler, ReduceLROnPlateau):
if monitor is None:
raise MisconfigurationException(
"`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`"
" scheduler is used. For example:"
' {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "metric_to_track"}'
)
config = LRSchedulerConfig(scheduler, reduce_on_plateau=True, monitor=monitor)
else:
config = LRSchedulerConfig(scheduler)
lr_scheduler_configs.append(config)
return lr_scheduler_configs
def _configure_schedulers_manual_opt(schedulers: list) -> List[LRSchedulerConfig]:
"""Convert each scheduler into `LRSchedulerConfig` structure with relevant information, when using manual
optimization."""
lr_scheduler_configs = []
for scheduler in schedulers:
if isinstance(scheduler, dict):
invalid_keys = {"interval", "frequency", "reduce_on_plateau", "monitor", "strict"}
keys_to_warn = [k for k in scheduler.keys() if k in invalid_keys]
if keys_to_warn:
rank_zero_warn(
f"The lr scheduler dict contains the key(s) {keys_to_warn}, but the keys will be ignored."
" You need to call `lr_scheduler.step()` manually in manual optimization.",
category=RuntimeWarning,
)
config = LRSchedulerConfig(**{key: scheduler[key] for key in scheduler if key not in invalid_keys})
else:
config = LRSchedulerConfig(scheduler)
lr_scheduler_configs.append(config)
return lr_scheduler_configs
def _validate_scheduler_api(lr_scheduler_configs: List[LRSchedulerConfig], model: "pl.LightningModule") -> None:
for config in lr_scheduler_configs:
scheduler = config.scheduler
if not isinstance(scheduler, _Stateful):
raise TypeError(
f"The provided lr scheduler `{scheduler.__class__.__name__}` is invalid."
" It should have `state_dict` and `load_state_dict` methods defined."
)
if not isinstance(scheduler, LRSchedulerTypeTuple) and not is_overridden("lr_scheduler_step", model):
raise MisconfigurationException(
f"The provided lr scheduler `{scheduler.__class__.__name__}` doesn't follow PyTorch's LRScheduler"
" API. You should override the `LightningModule.lr_scheduler_step` hook with your own logic if"
" you are using a custom LR scheduler."
)
def _set_scheduler_opt_idx(optimizers: List[Optimizer], lr_scheduler_configs: List[LRSchedulerConfig]) -> None:
for config in lr_scheduler_configs:
for opt_idx, opt in enumerate(optimizers):
if config.scheduler.optimizer is opt:
if config.opt_idx is not None and config.opt_idx != opt_idx:
raise MisconfigurationException(
"`opt_idx` set inside scheduler config does not match with the index"
" of the respective optimizer returned from `configure_optimizers`."
)
config.opt_idx = opt_idx
break
else:
raise MisconfigurationException(
"Some schedulers are attached with an optimizer that wasn't returned from `configure_optimizers`."
)
def _validate_optim_conf(optim_conf: Dict[str, Any]) -> None:
valid_keys = {"optimizer", "lr_scheduler", "frequency", "monitor"}
extra_keys = optim_conf.keys() - valid_keys
if extra_keys:
rank_zero_warn(
f"Found unsupported keys in the optimizer configuration: {set(extra_keys)}", category=RuntimeWarning
)
class _MockOptimizer(Optimizer):
"""The `_MockOptimizer` will be used inplace of an optimizer in the event that `None` is returned from
`configure_optimizers`."""
def __init__(self) -> None:
super().__init__([torch.zeros(1)], {})
def add_param_group(self, param_group: Dict[Any, Any]) -> None:
pass # Do Nothing
def load_state_dict(self, state_dict: Dict[Any, Any]) -> None:
pass # Do Nothing
def state_dict(self) -> Dict[str, Any]:
return {} # Return Empty
def step(self, closure: Callable = None) -> None:
if closure is not None:
closure()
def zero_grad(self, set_to_none: Optional[bool] = False) -> None:
pass # Do Nothing
def __repr__(self) -> str:
return "No Optimizer"
| [
"torch.zeros"
] | 1.7 | 314dev/pi | 0e65adf329b00b0d0f73346734df2c06eec05ddd |
1.2 | import os
import argparse
import logging
import torch.nn.functional as F
import numpy as np
import pymia.data.assembler as assembler
import SimpleITK as sitk
import common.trainloop.data as data
import common.evalutation.eval as ev
import common.trainloop.steps as step
import common.trainloop.context as ctx
import common.trainloop.hooks as hooks
import common.trainloop.loops as loop
import common.utils.messages as msg
import common.utils.threadhelper as thread
import common.utils.labelhelper as lh
import rechun.dl.customdatasets as isic
import rechun.directories as dirs
def main(config_file):
if config_file is None:
config_file = os.path.join(dirs.CONFIG_DIR, 'test_isic_aleatoric.yaml')
context = ctx.TorchTestContext('cuda')
context.load_from_config(config_file)
build_test = data.BuildData(
build_dataset=isic.BuildIsicDataset(),
)
if not hasattr(context.config.others, 'is_log_sigma'):
raise ValueError('"is_log_sigma" entry missing in configuration file')
is_log_sigma = context.config.others.is_log_sigma
test_steps = [AleatoricPredictStep(is_log_sigma), PrepareSubjectStep()]
subject_steps = [EvalSubjectStep()]
subject_assembler = assembler.Subject2dAssembler()
test = loop.Test(test_steps, subject_steps, subject_assembler)
hook = hooks.ReducedComposeTestLoopHook([hooks.ConsoleTestLogHook(),
hooks.WriteTestMetricsCsvHook('metrics.csv'),
WriteHook()
])
test(context, build_test, hook=hook)
class AleatoricPredictStep(step.BatchStep):
def __init__(self, is_log_sigma=False) -> None:
super().__init__()
self.is_log_sigma = is_log_sigma
def __call__(self, batch_context: ctx.BatchContext, task_context: ctx.TaskContext, context: ctx.Context) -> None:
if not isinstance(context, ctx.TorchTestContext):
raise ValueError(msg.get_type_error_msg(context, ctx.TorchTestContext))
batch_context.input['images'] = batch_context.input['images'].float().to(context.device)
mean_logits, sigma = context.model(batch_context.input['images'])
batch_context.output['logits'] = mean_logits
if self.is_log_sigma:
sigma = sigma.exp()
else:
sigma = sigma.abs()
batch_context.output['sigma'] = sigma
probabilities = F.softmax(batch_context.output['logits'], 1)
batch_context.output['probabilities'] = probabilities
class EvalSubjectStep(step.SubjectStep):
def __init__(self) -> None:
super().__init__()
self.evaluate = ev.ComposeEvaluation([ev.DiceNumpy()])
def __call__(self, subject_context: ctx.SubjectContext, task_context: ctx.TaskContext, context: ctx.Context) -> None:
probabilities = subject_context.subject_data['probabilities']
prediction = np.argmax(probabilities, axis=-1)
to_eval = {'prediction': prediction, 'probabilities': probabilities,
'target': subject_context.subject_data['labels'].squeeze(-1)}
results = {}
self.evaluate(to_eval, results)
subject_context.metrics.update(results)
class PrepareSubjectStep(step.BatchStep):
def __call__(self, batch_context: ctx.BatchContext, task_context: ctx.TaskContext, context: ctx.Context) -> None:
batch_context.output['labels'] = batch_context.input['labels'].unsqueeze(1) # re-add previously removed dim
class WriteHook(hooks.TestLoopHook):
def __del__(self):
thread.join_all()
print('joined....')
def on_test_subject_end(self, subject_context: ctx.SubjectContext, task_context: ctx.TaskContext,
context: ctx.TestContext):
if not isinstance(context, ctx.TorchTestContext):
raise ValueError(msg.get_type_error_msg(context, ctx.TorchTestContext))
thread.do_work(WriteHook._on_test_subject_end,
subject_context, task_context, context, in_background=True)
@staticmethod
def _on_test_subject_end(subject_context: ctx.SubjectContext, task_context: ctx.TaskContext,
context: ctx.TorchTestContext):
probabilities = subject_context.subject_data['probabilities']
predictions = np.argmax(probabilities, axis=-1).astype(np.uint8)
sigma = subject_context.subject_data['sigma']
prediction = np.argmax(probabilities, axis=-1)
sigma = sigma[lh.to_one_hot(prediction).astype(np.bool)].reshape(prediction.shape)
probabilities = probabilities[..., 1] # foreground class
id_ = subject_context.subject_index
probability_img = sitk.GetImageFromArray(probabilities)
prediction_img = sitk.GetImageFromArray(predictions)
sigma_img = sitk.GetImageFromArray(sigma)
sitk.WriteImage(probability_img, os.path.join(context.test_dir, '{}_probabilities.nii.gz'.format(id_)))
sitk.WriteImage(prediction_img, os.path.join(context.test_dir, '{}_prediction.nii.gz'.format(id_)))
sitk.WriteImage(sigma_img, os.path.join(context.test_dir, '{}_sigma.nii.gz'.format(id_)))
files = context.test_data.dataset.get_files_by_id(id_)
label_path = os.path.abspath(files['label_paths'])
label_out_path = os.path.join(context.test_dir, os.path.basename(label_path))
os.symlink(label_path, label_out_path)
image_path = os.path.abspath(files['image_paths'])
image_out_path = os.path.join(context.test_dir, os.path.basename(image_path))
os.symlink(image_path, image_out_path)
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser(description='ISIC test script (aleatoric)')
parser.add_argument('-config_file', type=str, help='the json file name containing the train configuration')
args = parser.parse_args()
main(args.config_file)
finally:
logging.exception('') # log the exception
| [
"torch.nn.functional.softmax"
] | 1.2.0 | alainjungo/reliability-challenges-uncertainty | 21e86f6e2a5d2520b5767dce48bbcf2b11773788 |
1.4 | from torch.utils.data import DataLoader
from sequence.data.datasets import brown
from sequence.data.utils import DatasetInference, Tokens, Language
from sequence.test import language, words, dataset, paths
import pytest
import numpy as np
import random
def test_dataset_torch_compatible(dataset):
dl = DataLoader(dataset)
# assert runs
next(iter(dl))
dl = DataLoader(dataset, shuffle=True)
next(iter(dl))
def test_brown_dataset():
ds, lang = brown()
assert lang[234] == "found"
# Check if punctuation is removed.
with pytest.raises(ValueError):
lang.words.index(".")
ds.get_batch(0, 10)
def test_dataset_split(dataset):
dataset.max_len = 10
dataset.min_len = 5
ds_train, ds_test = dataset.split([0.8, 0.2])
assert ds_train.min_len == dataset.min_len
assert ds_test.max_len == dataset.max_len
assert ds_test.data.shape == (400, 9)
assert ds_train.data.shape == (1600, 9)
def test_transition_matrix(dataset):
mm = dataset.transition_matrix
assert mm[0, 1] == 0
def test_inference_dset(paths, dataset):
language = dataset.language
# Shuffle paths so that the dataset cannot create the same data.
np.random.seed(1)
np.random.shuffle(paths)
new_paths = []
for p in paths:
new = list(p)
new.append(random.choice("ZYZWVUT"))
new_paths.append(new)
inference_ds = DatasetInference(sentences=new_paths, language=language)
# Assert that the new words are assigned to the UNKNOWN field.
assert (inference_ds.data.compute() == Tokens.UNKNOWN.value).sum() > 0
assert (dataset.data.compute() == Tokens.UNKNOWN.value).sum() == 0
print(inference_ds.data)
assert len(inference_ds) == len(new_paths)
def test_custom_emb():
emb = np.random.rand(3, 5)
lang = Language(custom_embeddings=emb)
# check if eos, sos and unknown are inserted
assert lang.custom_embeddings.shape == (6, 5)
| [
"torch.utils.data.DataLoader"
] | 1.4.0 | ritchie46/clickstream | 79c669d0636521db2697e5fa583628d1920cc6c1 |
1.9 | #! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import numpy as np
import torch
from ludwig.constants import *
from ludwig.decoders.generic_decoders import Classifier
from ludwig.encoders.set_encoders import ENCODER_REGISTRY
from ludwig.features.base_feature import InputFeature
from ludwig.features.base_feature import OutputFeature
from ludwig.features.feature_utils import set_str_to_idx
from ludwig.modules.loss_modules import SigmoidCrossEntropyLoss
from ludwig.modules.metric_modules import JaccardMetric
from ludwig.modules.metric_modules import SigmoidCrossEntropyMetric
from ludwig.utils import output_feature_utils
from ludwig.utils.misc_utils import set_default_value
from ludwig.utils.strings_utils import create_vocabulary, tokenizer_registry,\
UNKNOWN_SYMBOL
logger = logging.getLogger(__name__)
class SetFeatureMixin:
type = SET
preprocessing_defaults = {
'tokenizer': 'space',
'most_common': 10000,
'lowercase': False,
'missing_value_strategy': FILL_WITH_CONST,
'fill_value': UNKNOWN_SYMBOL
}
preprocessing_schema = {
'tokenizer': {'type': 'string', 'enum': sorted(list(tokenizer_registry.keys()))},
'most_common': {'type': 'integer', 'minimum': 0},
'lowercase': {'type': 'boolean'},
'missing_value_strategy': {'type': 'string', 'enum': MISSING_VALUE_STRATEGY_OPTIONS},
'fill_value': {'type': 'string'},
'computed_fill_value': {'type': 'string'},
}
@staticmethod
def cast_column(column, backend):
return column
@staticmethod
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
idx2str, str2idx, str2freq, max_size, _, _, _ = create_vocabulary(
column,
preprocessing_parameters['tokenizer'],
num_most_frequent=preprocessing_parameters['most_common'],
lowercase=preprocessing_parameters['lowercase'],
processor=backend.df_engine
)
return {
'idx2str': idx2str,
'str2idx': str2idx,
'str2freq': str2freq,
'vocab_size': len(str2idx),
'max_set_size': max_size
}
@staticmethod
def feature_data(column, metadata, preprocessing_parameters, backend):
def to_dense(x):
feature_vector = set_str_to_idx(
x,
metadata['str2idx'],
preprocessing_parameters['tokenizer']
)
set_vector = np.zeros((len(metadata['str2idx']),))
set_vector[feature_vector] = 1
return set_vector.astype(np.bool)
return backend.df_engine.map_objects(column, to_dense)
@staticmethod
def add_feature_data(
feature,
input_df,
proc_df,
metadata,
preprocessing_parameters,
backend,
skip_save_processed_input
):
proc_df[feature[PROC_COLUMN]] = SetFeatureMixin.feature_data(
input_df[feature[COLUMN]].astype(str),
metadata[feature[NAME]],
preprocessing_parameters,
backend
)
return proc_df
class SetInputFeature(SetFeatureMixin, InputFeature):
encoder = 'embed'
vocab = []
def __init__(self, feature, encoder_obj=None):
super().__init__(feature)
self.overwrite_defaults(feature)
if encoder_obj:
self.encoder_obj = encoder_obj
else:
self.encoder_obj = self.initialize_encoder(feature)
def forward(self, inputs):
assert isinstance(inputs, torch.Tensor)
assert inputs.dtype in [torch.bool, torch.int64]
encoder_output = self.encoder_obj(inputs)
return {'encoder_output': encoder_output}
@property
def input_dtype(self):
return torch.bool
@property
def input_shape(self) -> torch.Size:
return torch.Size([len(self.vocab)])
@staticmethod
def update_config_with_metadata(
input_feature,
feature_metadata,
*args,
**kwargs
):
input_feature['vocab'] = feature_metadata['idx2str']
@staticmethod
def populate_defaults(input_feature):
set_default_value(input_feature, TIED, None)
encoder_registry = ENCODER_REGISTRY
@property
def output_shape(self) -> torch.Size:
return self.encoder_obj.output_shape
class SetOutputFeature(SetFeatureMixin, OutputFeature):
decoder = 'classifier'
num_classes = 0
loss = {TYPE: SIGMOID_CROSS_ENTROPY}
metric_functions = {LOSS: None, JACCARD: None}
default_validation_metric = JACCARD
def __init__(self, feature):
super().__init__(feature)
self.num_classes = 0
self.threshold = 0.5
self.overwrite_defaults(feature)
self.decoder_obj = self.initialize_decoder(feature)
self._setup_loss()
self._setup_metrics()
def logits(
self,
inputs, # hidden
**kwargs
):
hidden = inputs[HIDDEN]
return self.decoder_obj(hidden)
def predictions(
self,
inputs,
feature_name,
**kwargs
):
logits = output_feature_utils.get_output_feature_tensor(
inputs, feature_name, LOGITS)
probabilities = torch.sigmoid(logits)
predictions = torch.greater_equal(probabilities, self.threshold)
predictions = predictions.type(torch.int64)
return {
PREDICTIONS: predictions,
PROBABILITIES: probabilities,
LOGITS: logits
}
def _setup_loss(self):
self.train_loss_function = SigmoidCrossEntropyLoss(**self.loss)
self.eval_loss_function = SigmoidCrossEntropyMetric(**self.loss)
def _setup_metrics(self):
self.metric_functions = {} # needed to shadow class variable
self.metric_functions[LOSS] = self.eval_loss_function
self.metric_functions[JACCARD] = JaccardMetric()
def get_prediction_set(self):
return {
PREDICTIONS, PROBABILITIES, LOGITS
}
@classmethod
def get_output_dtype(cls):
return torch.bool
@property
def input_shape(self) -> torch.Size:
return self.decoder_obj.input_shape
@property
def output_shape(self) -> torch.Size:
return torch.Size([self.num_classes])
@staticmethod
def update_config_with_metadata(
output_feature,
feature_metadata,
*args,
**kwargs
):
output_feature[LOSS][TYPE] = None
output_feature['num_classes'] = feature_metadata['vocab_size']
if isinstance(output_feature[LOSS]['class_weights'], (list, tuple)):
if (len(output_feature[LOSS]['class_weights']) !=
output_feature['num_classes']):
raise ValueError(
'The length of class_weights ({}) is not compatible with '
'the number of classes ({}) for feature {}. '
'Check the metadata JSON file to see the classes '
'and their order and consider there needs to be a weight '
'for the <UNK> and <PAD> class too.'.format(
len(output_feature[LOSS]['class_weights']),
output_feature['num_classes'],
output_feature[NAME]
)
)
if isinstance(output_feature[LOSS]['class_weights'], dict):
if (
feature_metadata['str2idx'].keys() !=
output_feature[LOSS]['class_weights'].keys()
):
raise ValueError(
'The class_weights keys ({}) are not compatible with '
'the classes ({}) of feature {}. '
'Check the metadata JSON file to see the classes '
'and consider there needs to be a weight '
'for the <UNK> and <PAD> class too.'.format(
output_feature[LOSS]['class_weights'].keys(),
feature_metadata['str2idx'].keys(),
output_feature[NAME]
)
)
else:
class_weights = output_feature[LOSS]['class_weights']
idx2str = feature_metadata['idx2str']
class_weights_list = [class_weights[s] for s in idx2str]
output_feature[LOSS]['class_weights'] = class_weights_list
@staticmethod
def calculate_overall_stats(
predictions,
targets,
train_set_metadata
):
# no overall stats, just return empty dictionary
return {}
def postprocess_predictions(
self,
result,
metadata,
output_directory,
backend,
):
predictions_col = f'{self.feature_name}_{PREDICTIONS}'
if predictions_col in result:
def idx2str(pred_set):
return [
metadata['idx2str'][i]
for i, pred in enumerate(pred_set)
if pred
]
result[predictions_col] = backend.df_engine.map_objects(
result[predictions_col],
idx2str,
)
probabilities_col = f'{self.feature_name}_{PROBABILITIES}'
prob_col = f'{self.feature_name}_{PROBABILITY}'
if probabilities_col in result:
threshold = self.threshold
def get_prob(prob_set):
return [
prob for prob in prob_set if
prob >= threshold
]
result[prob_col] = backend.df_engine.map_objects(
result[probabilities_col],
get_prob,
)
return result
@staticmethod
def populate_defaults(output_feature):
set_default_value(output_feature, LOSS,
{TYPE: SIGMOID_CROSS_ENTROPY, 'weight': 1})
set_default_value(output_feature[LOSS], 'weight', 1)
set_default_value(output_feature[LOSS], 'class_weights', None)
set_default_value(output_feature, 'threshold', 0.5)
set_default_value(output_feature, 'dependencies', [])
set_default_value(output_feature, 'reduce_input', SUM)
set_default_value(output_feature, 'reduce_dependencies', SUM)
decoder_registry = {
'classifier': Classifier,
'null': Classifier,
'none': Classifier,
'None': Classifier,
None: Classifier
}
| [
"torch.greater_equal",
"torch.sigmoid",
"torch.Size"
] | 1.9.0 | carlogrisetti/ludwig | 5c0887f14867e1577e0ddc3806c5cf7a781fb665 |
1.6 | import torch
import numpy as np
import random
from transformers import T5Tokenizer, T5ForConditionalGeneration
#Set all seeds to make output deterministic
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
#Paragraphs for which we want to generate queries
paragraphs = [
"Python is an interpreted, high-level and general-purpose programming language. Python's design philosophy emphasizes code readability with its notable use of significant whitespace. Its language constructs and object-oriented approach aim to help programmers write clear, logical code for small and large-scale projects.",
"Python is dynamically-typed and garbage-collected. It supports multiple programming paradigms, including structured (particularly, procedural), object-oriented and functional programming. Python is often described as a \"batteries included\" language due to its comprehensive standard library.",
"Python was created in the late 1980s, and first released in 1991, by Guido van Rossum as a successor to the ABC programming language. Python 2.0, released in 2000, introduced new features, such as list comprehensions, and a garbage collection system with reference counting, and was discontinued with version 2.7 in 2020. Python 3.0, released in 2008, was a major revision of the language that is not completely backward-compatible and much Python 2 code does not run unmodified on Python 3. With Python 2's end-of-life (and pip having dropped support in 2021), only Python 3.6.x and later are supported, with older versions still supporting e.g. Windows 7 (and old installers not restricted to 64-bit Windows).",
"Python interpreters are supported for mainstream operating systems and available for a few more (and in the past supported many more). A global community of programmers develops and maintains CPython, a free and open-source reference implementation. A non-profit organization, the Python Software Foundation, manages and directs resources for Python and CPython development.",
"As of January 2021, Python ranks third in TIOBE’s index of most popular programming languages, behind C and Java, having previously gained second place and their award for the most popularity gain for 2020.",
"Java is a class-based, object-oriented programming language that is designed to have as few implementation dependencies as possible. It is a general-purpose programming language intended to let application developers write once, run anywhere (WORA), meaning that compiled Java code can run on all platforms that support Java without the need for recompilation. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of the underlying computer architecture. The syntax of Java is similar to C and C++, but has fewer low-level facilities than either of them. The Java runtime provides dynamic capabilities (such as reflection and runtime code modification) that are typically not available in traditional compiled languages. As of 2019, Java was one of the most popular programming languages in use according to GitHub, particularly for client-server web applications, with a reported 9 million developers.",
"Java was originally developed by James Gosling at Sun Microsystems (which has since been acquired by Oracle) and released in 1995 as a core component of Sun Microsystems' Java platform. The original and reference implementation Java compilers, virtual machines, and class libraries were originally released by Sun under proprietary licenses. As of May 2007, in compliance with the specifications of the Java Community Process, Sun had relicensed most of its Java technologies under the GNU General Public License. Oracle offers its own HotSpot Java Virtual Machine, however the official reference implementation is the OpenJDK JVM which is free open source software and used by most developers and is the default JVM for almost all Linux distributions.",
"As of September 2020, the latest version is Java 15, with Java 11, a currently supported long-term support (LTS) version, released on September 25, 2018. Oracle released the last zero-cost public update for the legacy version Java 8 LTS in January 2019 for commercial use, although it will otherwise still support Java 8 with public updates for personal use indefinitely. Other vendors have begun to offer zero-cost builds of OpenJDK 8 and 11 that are still receiving security and other upgrades.",
"Oracle (and others) highly recommend uninstalling outdated versions of Java because of serious risks due to unresolved security issues. Since Java 9, 10, 12, 13, and 14 are no longer supported, Oracle advises its users to immediately transition to the latest version (currently Java 15) or an LTS release."
]
# For available models for query generation, see: https://huggingface.co/BeIR/
# Here, we use a T5-large model was trained on the MS MARCO dataset
tokenizer = T5Tokenizer.from_pretrained('BeIR/query-gen-msmarco-t5-large-v1')
model = T5ForConditionalGeneration.from_pretrained('BeIR/query-gen-msmarco-t5-large-v1')
model.eval()
#Select the device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
#Iterate over the paragraphs and generate for each some queries
with torch.no_grad():
for para in paragraphs:
input_ids = tokenizer.encode(para, return_tensors='pt').to(device)
outputs = model.generate(
input_ids=input_ids,
max_length=64,
do_sample=True,
top_p=0.95,
num_return_sequences=3)
print("\nParagraph:")
print(para)
print("\nGenerated Queries:")
for i in range(len(outputs)):
query = tokenizer.decode(outputs[i], skip_special_tokens=True)
print(f'{i + 1}: {query}')
"""
Output of the script:
Paragraph:
Python is an interpreted, high-level and general-purpose programming language. Python's design philosophy emphasizes code readability with its notable use of significant whitespace. Its language constructs and object-oriented approach aim to help programmers write clear, logical code for small and large-scale projects.
Generated Queries:
1: what is python language used for
2: what is python programming
3: what language do i use for scripts
Paragraph:
Python is dynamically-typed and garbage-collected. It supports multiple programming paradigms, including structured (particularly, procedural), object-oriented and functional programming. Python is often described as a "batteries included" language due to its comprehensive standard library.
Generated Queries:
1: what is python language
2: what programming paradigms do python support
3: what programming languages use python
Paragraph:
Python was created in the late 1980s, and first released in 1991, by Guido van Rossum as a successor to the ABC programming language. Python 2.0, released in 2000, introduced new features, such as list comprehensions, and a garbage collection system with reference counting, and was discontinued with version 2.7 in 2020. Python 3.0, released in 2008, was a major revision of the language that is not completely backward-compatible and much Python 2 code does not run unmodified on Python 3. With Python 2's end-of-life (and pip having dropped support in 2021), only Python 3.6.x and later are supported, with older versions still supporting e.g. Windows 7 (and old installers not restricted to 64-bit Windows).
Generated Queries:
1: what year did python start
2: when does the next python update release
3: when did python come out?
Paragraph:
Python interpreters are supported for mainstream operating systems and available for a few more (and in the past supported many more). A global community of programmers develops and maintains CPython, a free and open-source reference implementation. A non-profit organization, the Python Software Foundation, manages and directs resources for Python and CPython development.
Generated Queries:
1: what platform is python available on
2: what is python used for
3: what is python?
Paragraph:
As of January 2021, Python ranks third in TIOBE’s index of most popular programming languages, behind C and Java, having previously gained second place and their award for the most popularity gain for 2020.
Generated Queries:
1: what is the most used programming language in the world
2: what is python language
3: what is the most popular programming language in the world?
Paragraph:
Java is a class-based, object-oriented programming language that is designed to have as few implementation dependencies as possible. It is a general-purpose programming language intended to let application developers write once, run anywhere (WORA), meaning that compiled Java code can run on all platforms that support Java without the need for recompilation. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of the underlying computer architecture. The syntax of Java is similar to C and C++, but has fewer low-level facilities than either of them. The Java runtime provides dynamic capabilities (such as reflection and runtime code modification) that are typically not available in traditional compiled languages. As of 2019, Java was one of the most popular programming languages in use according to GitHub, particularly for client-server web applications, with a reported 9 million developers.
Generated Queries:
1: java how java works
2: what language is similar to java
3: what is java language
Paragraph:
Java was originally developed by James Gosling at Sun Microsystems (which has since been acquired by Oracle) and released in 1995 as a core component of Sun Microsystems' Java platform. The original and reference implementation Java compilers, virtual machines, and class libraries were originally released by Sun under proprietary licenses. As of May 2007, in compliance with the specifications of the Java Community Process, Sun had relicensed most of its Java technologies under the GNU General Public License. Oracle offers its own HotSpot Java Virtual Machine, however the official reference implementation is the OpenJDK JVM which is free open source software and used by most developers and is the default JVM for almost all Linux distributions.
Generated Queries:
1: what is java created by
2: when was java introduced to linux
3: who developed java?
Paragraph:
As of September 2020, the latest version is Java 15, with Java 11, a currently supported long-term support (LTS) version, released on September 25, 2018. Oracle released the last zero-cost public update for the legacy version Java 8 LTS in January 2019 for commercial use, although it will otherwise still support Java 8 with public updates for personal use indefinitely. Other vendors have begun to offer zero-cost builds of OpenJDK 8 and 11 that are still receiving security and other upgrades.
Generated Queries:
1: what is the latest version of java
2: what is the latest java version
3: what is the latest version of java
Paragraph:
Oracle (and others) highly recommend uninstalling outdated versions of Java because of serious risks due to unresolved security issues. Since Java 9, 10, 12, 13, and 14 are no longer supported, Oracle advises its users to immediately transition to the latest version (currently Java 15) or an LTS release.
Generated Queries:
1: why is oracle not supported
2: what version is oracle used in
3: which java version is obsolete
"""
| [
"torch.manual_seed",
"torch.no_grad",
"torch.cuda.is_available"
] | 1.6.0 | faezakamran/sentence-transformers | 2158fff3aa96651b10fe367c41fdd5008a33c5c6 |
1.0 | import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from torch.autograd.variable import Variable
import fast_self_multihead_attn_norm_add
class FastSelfAttnNormAddFunc(torch.autograd.Function) :
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs, lyr_nrm_gamma_weights, lyr_nrm_beta_weights, input_weights, output_weights, pad_mask, dropout_prob) :
heads_t = Variable(torch.tensor([heads]))
dropout_prob_t = Variable(torch.tensor([dropout_prob]))
null_tensor = torch.tensor([])
use_mask = (pad_mask is not None)
lyr_nrm_results, \
lyr_nrm_mean, \
lyr_nrm_invvar, \
input_lin_results, \
softmax_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
dropout_add_mask, \
outputs = \
fast_self_multihead_attn_norm_add.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
lyr_nrm_gamma_weights, \
lyr_nrm_beta_weights, \
input_weights, \
output_weights, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
lyr_nrm_results, \
lyr_nrm_mean, \
lyr_nrm_invvar, \
inputs, \
lyr_nrm_gamma_weights, \
lyr_nrm_beta_weights, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_add_mask, \
dropout_prob_t)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads) :
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
lyr_nrm_results, \
lyr_nrm_mean, \
lyr_nrm_invvar, \
inputs, \
lyr_nrm_gamma_weights, \
lyr_nrm_beta_weights, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_add_mask, \
dropout_prob_t = ctx.saved_tensors
input_grads, \
lyr_nrm_gamma_grads, \
lyr_nrm_beta_grads, \
input_weight_grads, \
output_weight_grads = \
fast_self_multihead_attn_norm_add.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
lyr_nrm_results, \
lyr_nrm_mean, \
lyr_nrm_invvar, \
inputs, \
lyr_nrm_gamma_weights, \
lyr_nrm_beta_weights, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_add_mask, \
dropout_prob_t[0])
return None, None, None, \
input_grads, \
lyr_nrm_gamma_grads, \
lyr_nrm_beta_grads, \
input_weight_grads, \
output_weight_grads, \
None, None
fast_self_attn_norm_add_func = FastSelfAttnNormAddFunc.apply
| [
"torch.tensor"
] | 1.0 | DonnieKim411/apex | fb00a5a1d569c7b118aa672b3dacac3663ca3911 |
1.2 | #!/usr/bin/env python3
from collections import namedtuple
from typing import Callable, Iterable, List, NamedTuple, Optional, Tuple, Union
import torch
from captum.attr import IntegratedGradients
from captum.attr._utils.batching import _batched_generator
from captum.attr._utils.common import _run_forward, safe_div
from captum.insights.features import BaseFeature
from torch import Tensor
from torch.nn import Module
OutputScore = namedtuple("OutputScore", "score index label")
VisualizationOutput = namedtuple(
"VisualizationOutput", "feature_outputs actual predicted active_index"
)
Contribution = namedtuple("Contribution", "name percent")
SampleCache = namedtuple("SampleCache", "inputs additional_forward_args label")
class FilterConfig(NamedTuple):
steps: int = 20
prediction: str = "all"
classes: List[str] = []
count: int = 4
class Batch:
def __init__(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
labels: Optional[Tensor],
additional_args=None,
):
r"""
Constructs batch of inputs to be attributed and visualized.
Args:
inputs (tensor or tuple of tensors): Batch of inputs for a model.
These may be either a Tensor or tuple of tensors. Each tensor
must correspond to a feature for AttributionVisualizer, and
the corresponding input transform function of the feature
is applied to each input tensor prior to passing it to the
model. It is assumed that the first dimension of each
input tensor corresponds to the number of examples
(batch size) and is aligned for all input tensors.
labels (tensor): Tensor containing correct labels for input examples.
This must be a 1D tensor with length matching the first
dimension of each input tensor.
additional_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples.
"""
self.inputs = inputs
self.labels = labels
self.additional_args = additional_args
class AttributionVisualizer(object):
def __init__(
self,
models: Union[List[Module], Module],
classes: List[str],
features: Union[List[BaseFeature], BaseFeature],
dataset: Iterable[Batch],
score_func: Optional[Callable] = None,
use_label_for_attr: bool = True,
):
r"""
Args:
models (torch.nn.module): PyTorch module (model) for attribution
visualization.
We plan to support visualizing and comparing multiple models
in the future, but currently this supports only a single
model.
classes (list of string): List of strings corresponding to the names of
classes for classification.
features (list of BaseFeature): List of BaseFeatures, which correspond
to input arguments to the model. Each feature object defines
relevant transformations for converting to model input,
constructing baselines, and visualizing. The length of the
features list should exactly match the number of (tensor)
arguments expected by the given model.
For instance, an image classifier should only provide
a single BaseFeature, while a multimodal classifier may
provide a list of features, each corresponding to a different
tensor input and potentially different modalities.
dataset (iterable of Batch): Defines the dataset to visualize attributions
for. This must be an iterable of batch objects, each of which
may contain multiple input examples.
score_func (callable, optional): This function is applied to the model
output to obtain the score for each class. For instance,
this function could be the softmax or final non-linearity
of the network, applied to the model output. The indices
of the second dimension of the output should correspond
to the class names provided. If None, the model outputs
are taken directly and assumed to correspond to the
class scores.
Default: None
use_label_for_attr (boolean, optional): If true, the class index is passed
to the relevant attribution method. This is necessary in most
cases where there is an output neuron corresponding to each
class. When the model output is a scalar and class index
(e.g. positive, negative) is inferred from the output value,
this argument should be False.
Default: True
"""
if not isinstance(models, List):
models = [models]
if not isinstance(features, List):
features = [features]
self.models = models
self.classes = classes
self.features = features
self.dataset = dataset
self.score_func = score_func
self._outputs = []
self._config = FilterConfig(steps=25, prediction="all", classes=[], count=4)
self._use_label_for_attr = use_label_for_attr
def _calculate_attribution_from_cache(
self, index: int, target: Optional[Tensor]
) -> VisualizationOutput:
c = self._outputs[index][1]
return self._calculate_vis_output(
c.inputs, c.additional_forward_args, c.label, torch.tensor(target)
)
def _calculate_attribution(
self,
net: Module,
baselines: Optional[List[Tuple[Tensor, ...]]],
data: Tuple[Tensor, ...],
additional_forward_args: Optional[Tuple[Tensor, ...]],
label: Optional[Union[Tensor]],
) -> Tensor:
ig = IntegratedGradients(net)
# TODO support multiple baselines
baseline = baselines[0] if len(baselines) > 0 else None
label = (
None
if not self._use_label_for_attr or label is None or label.nelement() == 0
else label
)
attr_ig = ig.attribute(
data,
baselines=baseline,
additional_forward_args=additional_forward_args,
target=label,
n_steps=self._config.steps,
)
return attr_ig
def _update_config(self, settings):
self._config = FilterConfig(
steps=int(settings["approximation_steps"]),
prediction=settings["prediction"],
classes=settings["classes"],
count=4,
)
def render(self):
from IPython.display import display
from captum.insights.widget import CaptumInsights
widget = CaptumInsights(visualizer=self)
display(widget)
def serve(self, blocking=False, debug=False, port=None):
from captum.insights.server import start_server
start_server(self, blocking=blocking, debug=debug, _port=port)
def _get_labels_from_scores(
self, scores: Tensor, indices: Tensor
) -> List[OutputScore]:
pred_scores = []
for i in range(len(indices)):
score = scores[i]
pred_scores.append(OutputScore(score, indices[i], self.classes[indices[i]]))
return pred_scores
def _transform(
self,
transforms: Union[Callable, List[Callable]],
inputs: Tensor,
batch: bool = False,
) -> Tensor:
transformed_inputs = inputs
# TODO support batch size > 1
if batch:
transformed_inputs = inputs.squeeze()
if isinstance(transforms, List):
for t in transforms:
transformed_inputs = t(transformed_inputs)
else:
transformed_inputs = transforms(transformed_inputs)
if batch:
transformed_inputs = transformed_inputs.unsqueeze(0)
return transformed_inputs
def _calculate_net_contrib(self, attrs_per_input_feature: List[Tensor]):
# get the net contribution per feature (input)
net_contrib = torch.stack(
[attrib.flatten().sum() for attrib in attrs_per_input_feature]
)
# normalise the contribution, s.t. sum(abs(x_i)) = 1
norm = torch.norm(net_contrib, p=1)
net_contrib = safe_div(net_contrib, norm, default_value=net_contrib)
return net_contrib.tolist()
def _predictions_matches_labels(
self, predicted_scores: List[OutputScore], labels: Union[str, List[str]]
) -> bool:
if len(predicted_scores) == 0:
return False
predicted_label = predicted_scores[0].label
if isinstance(labels, List):
return predicted_label in labels
return labels == predicted_label
def _should_keep_prediction(
self, predicted_scores: List[OutputScore], actual_label: OutputScore
) -> bool:
# filter by class
if len(self._config.classes) != 0:
if not self._predictions_matches_labels(
predicted_scores, self._config.classes
):
return False
# filter by accuracy
label_name = actual_label.label
if self._config.prediction == "all":
pass
elif self._config.prediction == "correct":
if not self._predictions_matches_labels(predicted_scores, label_name):
return False
elif self._config.prediction == "incorrect":
if self._predictions_matches_labels(predicted_scores, label_name):
return False
else:
raise Exception(f"Invalid prediction config: {self._config.prediction}")
return True
def _calculate_vis_output(
self, inputs, additional_forward_args, label, target=None
) -> Optional[VisualizationOutput]:
net = self.models[0] # TODO process multiple models
# initialize baselines
baseline_transforms_len = len(self.features[0].baseline_transforms or [])
baselines = [
[None] * len(self.features) for _ in range(baseline_transforms_len)
]
transformed_inputs = list(inputs)
# transformed_inputs = list([i.clone() for i in inputs])
for feature_i, feature in enumerate(self.features):
if feature.input_transforms is not None:
transformed_inputs[feature_i] = self._transform(
feature.input_transforms, transformed_inputs[feature_i], True
)
if feature.baseline_transforms is not None:
assert baseline_transforms_len == len(
feature.baseline_transforms
), "Must have same number of baselines across all features"
for baseline_i, baseline_transform in enumerate(
feature.baseline_transforms
):
baselines[baseline_i][feature_i] = self._transform(
baseline_transform, transformed_inputs[feature_i], True
)
outputs = _run_forward(
net,
tuple(transformed_inputs),
additional_forward_args=additional_forward_args,
)
if self.score_func is not None:
outputs = self.score_func(outputs)
if outputs.nelement() == 1:
scores = outputs
predicted = scores.round().to(torch.int)
else:
scores, predicted = outputs.topk(min(4, outputs.shape[-1]))
scores = scores.cpu().squeeze(0)
predicted = predicted.cpu().squeeze(0)
if label is not None and len(label) > 0:
actual_label_output = OutputScore(
score=100, index=label[0], label=self.classes[label[0]]
)
else:
actual_label_output = None
predicted_scores = self._get_labels_from_scores(scores, predicted)
# Filter based on UI configuration
if not self._should_keep_prediction(predicted_scores, actual_label_output):
return None
baselines = [tuple(b) for b in baselines]
if target is None:
target = predicted_scores[0].index if len(predicted_scores) > 0 else None
# attributions are given per input*
# inputs given to the model are described via `self.features`
#
# *an input contains multiple features that represent it
# e.g. all the pixels that describe an image is an input
attrs_per_input_feature = self._calculate_attribution(
net, baselines, tuple(transformed_inputs), additional_forward_args, target
)
net_contrib = self._calculate_net_contrib(attrs_per_input_feature)
# the features per input given
features_per_input = [
feature.visualize(attr, data, contrib)
for feature, attr, data, contrib in zip(
self.features, attrs_per_input_feature, inputs, net_contrib
)
]
return VisualizationOutput(
feature_outputs=features_per_input,
actual=actual_label_output,
predicted=predicted_scores,
active_index=target if target is not None else actual_label_output.index,
)
def _get_outputs(self) -> List[VisualizationOutput]:
batch_data = next(self.dataset)
vis_outputs = []
for inputs, additional_forward_args, label in _batched_generator(
inputs=batch_data.inputs,
additional_forward_args=batch_data.additional_args,
target_ind=batch_data.labels,
internal_batch_size=1, # should be 1 until we have batch label support
):
output = self._calculate_vis_output(inputs, additional_forward_args, label)
if output is not None:
cache = SampleCache(inputs, additional_forward_args, label)
vis_outputs.append((output, cache))
return vis_outputs
def visualize(self):
self._outputs = []
while len(self._outputs) < self._config.count:
try:
self._outputs.extend(self._get_outputs())
except StopIteration:
break
return [o[0] for o in self._outputs]
| [
"torch.norm",
"torch.tensor"
] | 1.2 | BrianTillman/captum | edf41d31bd12bd38846b1214ade0ad897063a4d4 |
1.2 | # -*- coding: utf-8 -*-
r"""
malgan.discriminator
~~~~~~~~~~~~~~~~~
Discriminator (i.e., substitute detector) block for MalGAN.
Based on the paper: "Generating Adversarial Malware Examples for Black-Box Attacks Based on GAN"
By Weiwei Hu and Ying Tan.
:version: 0.1.0
:copyright: (c) 2019 by Zayd Hammoudeh.
:license: MIT, see LICENSE for more details.
"""
from typing import List
import torch
from torch import Tensor
import torch.nn as nn
# noinspection PyPep8Naming
class Discriminator(nn.Module):
r""" MalGAN discriminator (substitute detector). Simple feed forward network. """
EPS = 1e-7
def __init__(self, M: int, hidden_size: List[int], g: nn.Module):
r"""Discriminator Constructor
Builds the discriminator block.
:param M: Width of the malware feature vector
:param hidden_size: Width of the hidden layer(s).
:param g: Activation function
"""
super().__init__()
# Build the feed forward layers.
self._layers = nn.Sequential()
for i, (in_w, out_w) in enumerate(zip([M] + hidden_size[:-1], hidden_size)):
layer = nn.Sequential(nn.Linear(in_w, out_w), g)
self._layers.add_module("FF%02d" % i, layer)
layer = nn.Sequential(nn.Linear(hidden_size[-1], 1), nn.Sigmoid())
self._layers.add_module("FF%02d" % len(hidden_size), layer)
def forward(self, X: Tensor) -> Tensor:
r"""
Forward path through the discriminator.
:param X: Input example tensor
:return: :math:`D_{sigma}(x)` -- Value predicted by the discriminator.
"""
d_theta = self._layers(X)
return torch.clamp(d_theta, self.EPS, 1. - self.EPS).view(-1)
| [
"torch.nn.Sequential",
"torch.nn.Linear",
"torch.clamp",
"torch.nn.Sigmoid"
] | 1.2.0 | ZaydH/malware_gan | ea3f4e5139e6343c26273db0299a4b9d96d814af |
1.7 | import chess
import argparse
import questionary
import os
import json
import numpy as np
import torch
from tqdm import tqdm
import model
import utils
import pickle
import chess.engine
import nltk
from nltk.translate.bleu_score import SmoothingFunction
MASK_CHAR = u"\u2047"
engine = chess.engine.SimpleEngine.popen_uci("/usr/local/bin/stockfish")
device = torch.cuda.current_device() if torch.cuda.is_available() else 'cpu'
def get_prediction(game_str, gpt_model, vocabs, masks, size=10, sample=False):
stoi = vocabs['stoi']
itos = vocabs['itos']
x = game_str + MASK_CHAR if masks else game_str
x = torch.tensor([stoi[s] for s in x], dtype=torch.long)[None,...].to(device)
pred = utils.sample(gpt_model, x, size, sample=sample)[0]
completion = ''.join([itos[int(i)] for i in pred])
if masks:
return completion.split(MASK_CHAR)[1]
else:
return completion[len(game_str):]
def bot_vs_stockfish(game_str, comm_model, chept_model, comm_vocabs, chept_vocabs, args):
commentaries = []
board = chess.Board()
while True:
comp_move = engine.play(board, chess.engine.Limit(time=0.0005))
game_str += board.san(comp_move.move) + ' '
board.push(comp_move.move)
if board.is_stalemate() or board.is_insufficient_material():
break
if board.is_checkmate():
break
# bot turn
# handle cases where game str is larger than block size
if len(game_str) >= 504:
break
bot_move = get_prediction(game_str, chept_model, chept_vocabs, args.masks).split(' ')[0]
try:
board.push_san(bot_move)
except ValueError:
# try re-sampling
success = False
for i in range(args.n_tries):
bot_move = get_prediction(game_str, chept_model, chept_vocabs, args.masks, sample=True).split(' ')[0]
try:
board.push_san(bot_move)
success = True
break
except ValueError:
pass
if not success:
break
game_str = game_str + bot_move + ' '
# get commentary:
commentary = get_prediction(game_str, comm_model, comm_vocabs, args.masks, size=args.comm_size)
print(game_str, commentary)
commentaries.append(game_str + commentary)
if board.is_stalemate() or board.is_insufficient_material():
break
if board.is_checkmate():
break
return commentaries
def save_results(results, args, scenario):
save_dir = 'commentary_results'
save_dir = os.path.join(save_dir, scenario)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = args.save_name + '.json'
save_path = os.path.join(save_dir, save_file)
with open(save_path, 'w', encoding='utf-8') as f:
json.dump(results, f, ensure_ascii=False, indent=4)
def eval_scores(references, hypotheses):
BLEUscores = []
lengths = []
sentences = []
for i in range(len(hypothesis)):
reference, hypothesis = references[i], hypotheses[i]
lengths.append({'Reference Length': len(reference),
'Prediction Length': len(hypothesis)})
sentences.append({'Reference': reference,
'Predicted': hypothesis})
reference, hypothesis = reference.split(' '), hypothesis.split(' ')
# TODO: weights based on lengths of sentences
BLEUscore = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis,
smoothing_function=SmoothingFunction(epsilon=1e-12).method1)
BLEUscores.append(BLEUscore)
avg_bleu = np.mean(BLEUscores)
results_dict = {'BLEU Scores': BLEUscores,
'Average BLEU': float(avg_bleu),
'Sentences': sentences,
'Sentence Lengths': lengths}
return results_dict
def main(comm_model, chept_model, test_file, comm_vocabs, chept_vocabs, args):
if chept_model and chept_vocabs:
print(f'\nPlaying {args.n_games} games of ChePT vs. Stockfish')
results = {}
for i in tqdm(range(args.n_games)):
result = bot_vs_stockfish('',
comm_model,
chept_model,
comm_vocabs,
chept_vocabs,
args)
results[i] = result
save_results(results, args, '_with_both')
else:
test_scenarios = open(test_file).readlines()
print(f'\nEvaluating {len(test_scenarios)} test scenarios.')
references = []
hypotheses = []
pgns = []
test_scenarios = test_scenarios[400:450]
for test in tqdm(test_scenarios):
split = test.split(MASK_CHAR)
if not args.no_ref:
references.append(split[1])
commentary = get_prediction(split[0], comm_model, comm_vocabs, args.masks, size=args.comm_size, sample=args.sampling)
pgns.append(split[0])
print(commentary)
hypotheses.append(commentary)
# eval results (such as bleu score)
if not args.no_ref:
results = eval_scores(references, hypotheses)
results.update({'PGNs': pgns})
save_results(results, args, '_eval_text')
def get_recent_ckpt(ckpt_dir):
if not os.path.isdir(ckpt_dir):
raise ValueError(f"Default checkpoint dir at {ckpt_dir} missing!")
files = os.listdir(ckpt_dir)
if 'best_loss.pt' in files:
answer = questionary.confirm("File best_loss.pt found. Use this file?").ask()
if answer:
return os.path.join(ckpt_dir, 'best_loss.pt')
epoch_list = [x for x in files if 'epoch' in x]
if len(epoch_list) > 0:
answer = questionary.confirm("Epoch files found. Use best epoch file?").ask()
if answer:
epoch_list.sort(key=lambda x: int(x.split('_')[1].split('.')[0]), reverse=True)
return os.path.join(ckpt_dir, epoch_list[0])
iter_list = [x for x in files if 'iter' in x]
iter_list.sort(key=lambda x: int(x.split('_')[1].split('.')[0]), reverse=True)
return os.path.join(ckpt_dir, iter_list[0])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--comm_ckpt', type=str, default='ckpts/commentary_final',
help='Path to commentary model to use')
parser.add_argument('--chept_ckpt', type=str, default='ckpts/finetune_late/iter_152000.pt',
help='Path to ChePT model to use')
parser.add_argument('--test_file', type=str, default=None,
help='Test inputs to evaluate')
parser.add_argument('--n_games', type=int, default=5,
help='Numer of games to evaluate')
parser.add_argument('--n_tries', type=int, default=5,
help='Number of retries to give ChePT')
parser.add_argument('--comm_size', type=int, default=200,
help='Number of samples to take for commentary generation')
parser.add_argument('--masks', action='store_false',
help='Toggle masks OFF')
parser.add_argument('--sampling', action='store_true',
help='Toggle sampling ON')
parser.add_argument('--no_refs', action='store_false',
help='Toggle references off')
args = parser.parse_args()
comm_ckpt, chept_ckpt, test_file = args.comm_ckpt, args.chept_ckpt, args.test_file
if os.path.isdir(comm_ckpt):
comm_ckpt = get_recent_ckpt(comm_ckpt)
print(f'Using {comm_ckpt} for commentary model')
if chept_ckpt and test_file:
chept_ckpt = None
print('Evaluating model to find BLEU score.')
assert os.path.isfile(test_file)
else:
assert os.path.isfile(chept_ckpt)
suffix = '_with_chept' if chept_ckpt else '_score_eval'
args.save_name = comm_ckpt.split('/')[1] + suffix
# get ckpt
comm_ckpt = torch.load(comm_ckpt, map_location=torch.device(device))
comm_model_config = comm_ckpt['model_config']
comm_itos = comm_ckpt['itos']
comm_stoi = comm_ckpt['stoi']
comm_vocabs = {'itos': comm_itos,
'stoi': comm_stoi
}
# build model config
comm_mconf = model.GPTConfig(
vocab_size=len(comm_itos),
args_dict=comm_model_config.__dict__
)
# load model weights
comm_model = model.GPT(comm_mconf)
comm_model = comm_model.to(device)
comm_model.load_state_dict(comm_ckpt['state_dict'])
if chept_ckpt:
chept_ckpt = torch.load(chept_ckpt, map_location=torch.device(device))
chept_model_config = chept_ckpt['model_config']
chept_itos = chept_ckpt['itos']
chept_stoi = chept_ckpt['stoi']
chept_vocabs = {'itos': chept_itos,
'stoi': chept_stoi
}
# build model config
chept_mconf = model.GPTConfig(
vocab_size=len(chept_itos),
args_dict=chept_model_config.__dict__
)
# load model weights
chept_model = model.GPT(chept_mconf)
chept_model = chept_model.to(device)
chept_model.load_state_dict(chept_ckpt['state_dict'])
else:
chept_model = None
chept_vocabs = None
main(comm_model, chept_model, test_file, comm_vocabs, chept_vocabs, args)
engine.quit()
| [
"torch.device",
"torch.cuda.current_device",
"torch.cuda.is_available",
"torch.tensor"
] | 1.7.1 | HarryMellsop/chept-neural-chess | 656cb385e69d21c28117ef1fd0ecc671e01f1c1d |
1.5 | import datetime
import json
import os
import os.path as osp
from contextlib import contextmanager
try:
from torch.utils.tensorboard.writer import SummaryWriter
except ImportError:
print("Unable to import tensorboard SummaryWriter, proceeding without.")
from rlpyt.utils.logging import logger
LOG_DIR = osp.abspath(osp.join(osp.dirname(__file__), '../../../data'))
# def get_log_dir(experiment_name, root_log_dir=None, date=True):
# root_log_dir = LOG_DIR if root_log_dir is None else root_log_dir
# log_dir = osp.join(root_log_dir, experiment_name)
# return log_dir
@contextmanager
def logger_context(
log_dir, log_params=None, snapshot_mode="none", override_prefix=False,
use_summary_writer=False,
):
"""Use as context manager around calls to the runner's ``train()`` method.
Sets up the logger directory and filenames. Unless override_prefix is
True, this function automatically prepends ``log_dir`` with the rlpyt
logging directory and the date: `path-to-rlpyt/data/yyyymmdd/hhmmss`
(`data/` is in the gitignore), and appends with `/run_{run_ID}` to
separate multiple runs of the same settings. Saves hyperparameters
provided in ``log_params`` to `params.json`, along with experiment `name`
and `run_ID`.
Input ``snapshot_mode`` refers to how often the logger actually saves the
snapshot (e.g. may include agent parameters). The runner calls on the
logger to save the snapshot at every iteration, but the input
``snapshot_mode`` sets how often the logger actually saves (e.g. snapshot
may include agent parameters). Possible modes include (but check inside
the logger itself):
* "none": don't save at all
* "last": always save and overwrite the previous
* "all": always save and keep each iteration
* "gap": save periodically and keep each (will also need to set the gap, not done here)
The cleanup operations after the ``yield`` close files but might not be
strictly necessary if not launching another training session in the same
python process.
"""
logger.set_snapshot_mode(snapshot_mode)
logger.set_log_tabular_only(False)
name, run = log_dir.split('/')[-2:]
run_ID = run[-1]
tabular_log_file = osp.join(log_dir, "progress.csv")
text_log_file = osp.join(log_dir, "debug.log")
params_log_file = osp.join(log_dir, "params.json")
logger.set_snapshot_dir(log_dir)
if use_summary_writer:
logger.set_tf_summary_writer(SummaryWriter(log_dir))
logger.add_text_output(text_log_file)
logger.add_tabular_output(tabular_log_file)
logger.push_prefix(f"{name}_{run_ID} ")
if log_params is None:
log_params = dict()
log_params["name"] = name
log_params["run_ID"] = run_ID
with open(params_log_file, "w") as f:
json.dump(log_params, f, default=lambda o: type(o).__name__)
yield
logger.remove_tabular_output(tabular_log_file)
logger.remove_text_output(text_log_file)
logger.pop_prefix()
def add_exp_param(param_name, param_val, exp_dir=None, overwrite=False):
"""Puts a param in all experiments in immediate subdirectories.
So you can write a new distinguising param after the fact, perhaps
reflecting a combination of settings."""
if exp_dir is None:
exp_dir = os.getcwd()
for sub_dir in os.walk(exp_dir):
if "params.json" in sub_dir[2]:
update_param = True
params_f = osp.join(sub_dir[0], "params.json")
with open(params_f, "r") as f:
params = json.load(f)
if param_name in params:
if overwrite:
print("Overwriting param: {}, old val: {}, new val: {}".format(
param_name, params[param_name], param_val))
else:
print("Param {} already found & overwrite set to False; "
"leaving old val: {}.".format(param_name, params[param_name]))
update_param = False
if update_param:
os.remove(params_f)
params[param_name] = param_val
with open(params_f, "w") as f:
json.dump(params, f, default=lambda o: type(o).__name__)
| [
"torch.utils.tensorboard.writer.SummaryWriter"
] | 1.5.1 | williamd4112/curiosity_baselines | 45939f3f24c53cfff5153ef012486a6a058660be |
1.6 | import numpy as np
import torch
from .downloader import load_trained_model
from .spacy_extensions import ConstituentData
from ..parse_base import BaseInputExample
class PartialConstituentData:
def __init__(self):
self.starts = [np.array([], dtype=int)]
self.ends = [np.array([], dtype=int)]
self.labels = [np.array([], dtype=int)]
def finalize(self, doc, label_vocab):
self.starts = np.hstack(self.starts)
self.ends = np.hstack(self.ends)
self.labels = np.hstack(self.labels)
# TODO(nikita): Python for loops aren't very fast
loc_to_constituent = np.full(len(doc), -1, dtype=int)
prev = None
for position in range(self.starts.shape[0]):
if self.starts[position] != prev:
prev = self.starts[position]
loc_to_constituent[self.starts[position]] = position
return ConstituentData(
self.starts, self.ends, self.labels, loc_to_constituent, label_vocab
)
class SentenceWrapper(BaseInputExample):
TEXT_NORMALIZATION_MAPPING = {
"`": "'",
"«": '"',
"»": '"',
"‘": "'",
"’": "'",
"“": '"',
"”": '"',
"„": '"',
"‹": "'",
"›": "'",
"—": "--", # em dash
}
def __init__(self, spacy_sent):
self.sent = spacy_sent
@property
def words(self):
return [
self.TEXT_NORMALIZATION_MAPPING.get(token.text, token.text)
for token in self.sent
]
@property
def space_after(self):
return [bool(token.whitespace_) for token in self.sent]
@property
def tree(self):
return None
def leaves(self):
return self.words
def pos(self):
return [(word, "UNK") for word in self.words]
class BeneparComponent:
"""
Berkeley Neural Parser (benepar) component for spaCy.
Sample usage:
>>> nlp = spacy.load('en_core_web_md')
>>> if spacy.__version__.startswith('2'):
nlp.add_pipe(BeneparComponent("benepar_en3"))
else:
nlp.add_pipe("benepar", config={"model": "benepar_en3"})
>>> doc = nlp("The quick brown fox jumps over the lazy dog.")
>>> sent = list(doc.sents)[0]
>>> print(sent._.parse_string)
This component is only responsible for constituency parsing and (for some
trained models) part-of-speech tagging. It should be preceded in the
pipeline by other components that can, at minimum, perform tokenization and
sentence segmentation.
"""
name = "benepar"
def __init__(
self,
name,
subbatch_max_tokens=500,
disable_tagger=False,
batch_size="ignored",
):
"""Load a trained parser model.
Args:
name (str): Model name, or path to pytorch saved model
subbatch_max_tokens (int): Maximum number of tokens to process in
each batch
disable_tagger (bool, default False): Unless disabled, the parser
will set predicted part-of-speech tags for the document,
overwriting any existing tags provided by spaCy models or
previous pipeline steps. This option has no effect for parser
models that do not have a part-of-speech tagger built in.
batch_size: deprecated and ignored; use subbatch_max_tokens instead
"""
self._parser = load_trained_model(name)
if torch.cuda.is_available():
self._parser.cuda()
self.subbatch_max_tokens = subbatch_max_tokens
self.disable_tagger = disable_tagger
self._label_vocab = self._parser.config["label_vocab"]
label_vocab_size = max(self._label_vocab.values()) + 1
self._label_from_index = [()] * label_vocab_size
for label, i in self._label_vocab.items():
if label:
self._label_from_index[i] = tuple(label.split("::"))
else:
self._label_from_index[i] = ()
self._label_from_index = tuple(self._label_from_index)
if not self.disable_tagger:
tag_vocab = self._parser.config["tag_vocab"]
tag_vocab_size = max(tag_vocab.values()) + 1
self._tag_from_index = [()] * tag_vocab_size
for tag, i in tag_vocab.items():
self._tag_from_index[i] = tag
self._tag_from_index = tuple(self._tag_from_index)
else:
self._tag_from_index = None
def __call__(self, doc):
"""Update the input document with predicted constituency parses."""
# TODO(https://github.com/nikitakit/self-attentive-parser/issues/16): handle
# tokens that consist entirely of whitespace.
constituent_data = PartialConstituentData()
wrapped_sents = [SentenceWrapper(sent) for sent in doc.sents]
for sent, parse in zip(
doc.sents,
self._parser.parse(
wrapped_sents,
return_compressed=True,
subbatch_max_tokens=self.subbatch_max_tokens,
),
):
constituent_data.starts.append(parse.starts + sent.start)
constituent_data.ends.append(parse.ends + sent.start)
constituent_data.labels.append(parse.labels)
if parse.tags is not None and not self.disable_tagger:
for i, tag_id in enumerate(parse.tags):
sent[i].tag_ = self._tag_from_index[tag_id]
doc._._constituent_data = constituent_data.finalize(doc, self._label_from_index)
return doc
def create_benepar_component(
nlp,
name,
model: str,
subbatch_max_tokens: int,
disable_tagger: bool,
):
return BeneparComponent(
model,
subbatch_max_tokens=subbatch_max_tokens,
disable_tagger=disable_tagger,
)
def register_benepar_component_factory():
# Starting with spaCy 3.0, nlp.add_pipe no longer directly accepts
# BeneparComponent instances. We must instead register a component factory.
import spacy
if spacy.__version__.startswith("2"):
return
from spacy.language import Language
Language.factory(
"benepar",
default_config={
"subbatch_max_tokens": 500,
"disable_tagger": False,
},
func=create_benepar_component,
)
try:
register_benepar_component_factory()
except ImportError:
pass
| [
"torch.cuda.is_available"
] | 1.6.0 | speedcell4/self-attentive-parser | 644a27d07316d1441a62425c85f78128b8dee4fe |
1.1 | import torch.nn as nn
import torch.nn.functional as F
class MnistModel(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
| [
"torch.nn.Linear",
"torch.nn.functional.dropout",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.Dropout2d"
] | 1.1 | Yoontae6719/pytorch_hydra | c608f144fb8002bdcd329e09daf0416cba10a850 |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import OrderedDict
from logging import INFO
from typing import Union
import pytest
import torch
import torch.nn.utils.prune as pytorch_prune
from torch import nn
from torch.nn import Sequential
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, ModelPruning
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
class TestModel(BoringModel):
test_step = None
def __init__(self):
super().__init__()
self.layer = Sequential(
OrderedDict(
[("mlp_1", nn.Linear(32, 32)), ("mlp_2", nn.Linear(32, 32, bias=False)), ("mlp_3", nn.Linear(32, 2))]
)
)
def training_step(self, batch, batch_idx):
self.log("test", -batch_idx)
return super().training_step(batch, batch_idx)
class TestPruningMethod(pytorch_prune.BasePruningMethod):
PRUNING_TYPE = "unstructured"
def compute_mask(self, _, default_mask):
mask = default_mask.clone()
# Prune every other entry in a tensor
mask.view(-1)[::2] = 0
return mask
@classmethod
def apply(cls, module, name, amount):
return super().apply(module, name, amount=amount)
def train_with_pruning_callback(
tmpdir,
parameters_to_prune=False,
use_global_unstructured=False,
pruning_fn="l1_unstructured",
use_lottery_ticket_hypothesis=False,
strategy=None,
gpus=None,
num_processes=1,
):
model = TestModel()
# Weights are random. None is 0
assert torch.all(model.layer.mlp_2.weight != 0)
pruning_kwargs = {
"pruning_fn": pruning_fn,
"amount": 0.3,
"use_global_unstructured": use_global_unstructured,
"use_lottery_ticket_hypothesis": use_lottery_ticket_hypothesis,
"verbose": 1,
}
if parameters_to_prune:
pruning_kwargs["parameters_to_prune"] = [(model.layer.mlp_1, "weight"), (model.layer.mlp_2, "weight")]
else:
if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured"):
pruning_kwargs["parameter_names"] = ["weight"]
else:
pruning_kwargs["parameter_names"] = ["weight", "bias"]
if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured"):
pruning_kwargs["pruning_dim"] = 0
if pruning_fn == "ln_structured":
pruning_kwargs["pruning_norm"] = 1
# Misconfiguration checks
if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured") and use_global_unstructured:
with pytest.raises(MisconfigurationException, match="is supported with `use_global_unstructured=True`"):
ModelPruning(**pruning_kwargs)
return
if ModelPruning._is_pruning_method(pruning_fn) and not use_global_unstructured:
with pytest.raises(MisconfigurationException, match="currently only supported with"):
ModelPruning(**pruning_kwargs)
return
pruning = ModelPruning(**pruning_kwargs)
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
enable_model_summary=False,
enable_checkpointing=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=10,
strategy=strategy,
gpus=gpus,
num_processes=num_processes,
callbacks=pruning,
)
trainer.fit(model)
trainer.test(model)
if not strategy:
# Check some have been pruned
assert torch.any(model.layer.mlp_2.weight == 0)
def test_pruning_misconfiguration():
with pytest.raises(MisconfigurationException, match=r"chocolate isn't in \('weight', 'bias'\)"):
ModelPruning(pruning_fn="l1_unstructured", parameter_names=["chocolate"])
with pytest.raises(MisconfigurationException, match=r"expected to be a str in \["):
ModelPruning(pruning_fn={})
with pytest.raises(MisconfigurationException, match="should be provided"):
ModelPruning(pruning_fn="random_structured")
with pytest.raises(MisconfigurationException, match=r"must be any of \(0, 1, 2\)"):
ModelPruning(pruning_fn="l1_unstructured", verbose=3)
with pytest.raises(MisconfigurationException, match="requesting `ln_structured` pruning, the `pruning_norm`"):
ModelPruning(pruning_fn="ln_structured", pruning_dim=0)
@pytest.mark.parametrize("parameters_to_prune", [False, True])
@pytest.mark.parametrize("use_global_unstructured", [False, True])
@pytest.mark.parametrize(
"pruning_fn", ["l1_unstructured", "random_unstructured", "ln_structured", "random_structured", TestPruningMethod]
)
@pytest.mark.parametrize("use_lottery_ticket_hypothesis", [False, True])
def test_pruning_callback(
tmpdir,
use_global_unstructured: bool,
parameters_to_prune: bool,
pruning_fn: Union[str, pytorch_prune.BasePruningMethod],
use_lottery_ticket_hypothesis: bool,
):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=parameters_to_prune,
use_global_unstructured=use_global_unstructured,
pruning_fn=pruning_fn,
use_lottery_ticket_hypothesis=use_lottery_ticket_hypothesis,
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_0(tmpdir):
train_with_pruning_callback(
tmpdir, parameters_to_prune=False, use_global_unstructured=False, strategy="ddp", gpus=2
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_1(tmpdir):
train_with_pruning_callback(tmpdir, parameters_to_prune=False, use_global_unstructured=True, strategy="ddp", gpus=2)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_2(tmpdir):
train_with_pruning_callback(tmpdir, parameters_to_prune=True, use_global_unstructured=False, strategy="ddp", gpus=2)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_3(tmpdir):
train_with_pruning_callback(tmpdir, parameters_to_prune=True, use_global_unstructured=True, strategy="ddp", gpus=2)
@RunIf(min_gpus=2, skip_windows=True)
def test_pruning_callback_ddp_spawn(tmpdir):
train_with_pruning_callback(tmpdir, use_global_unstructured=True, strategy="ddp_spawn", gpus=2)
@RunIf(skip_windows=True)
def test_pruning_callback_ddp_cpu(tmpdir):
train_with_pruning_callback(tmpdir, parameters_to_prune=True, strategy="ddp_spawn", num_processes=2)
@pytest.mark.parametrize("resample_parameters", (False, True))
def test_pruning_lth_callable(tmpdir, resample_parameters: bool):
model = TestModel()
class ModelPruningTestCallback(ModelPruning):
lth_calls = 0
def apply_lottery_ticket_hypothesis(self):
super().apply_lottery_ticket_hypothesis()
self.lth_calls += 1
for d in self._original_layers.values():
copy, names = d["data"], d["names"]
for i, name in names:
curr, curr_name = self._parameters_to_prune[i]
assert name == curr_name
actual, expected = getattr(curr, name).data, getattr(copy, name).data
allclose = torch.allclose(actual, expected)
assert not allclose if self._resample_parameters else allclose
pruning = ModelPruningTestCallback(
"l1_unstructured", use_lottery_ticket_hypothesis=lambda e: bool(e % 2), resample_parameters=resample_parameters
)
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
enable_model_summary=False,
enable_checkpointing=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=5,
callbacks=pruning,
)
trainer.fit(model)
assert pruning.lth_calls == trainer.max_epochs // 2
@pytest.mark.parametrize("make_pruning_permanent", (False, True))
def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent: bool):
model = TestModel()
pruning_kwargs = {
"parameters_to_prune": [(model.layer.mlp_1, "weight"), (model.layer.mlp_3, "weight")],
"verbose": 2,
"make_pruning_permanent": make_pruning_permanent,
}
p1 = ModelPruning("l1_unstructured", amount=0.5, apply_pruning=lambda e: not e % 2, **pruning_kwargs)
p2 = ModelPruning("random_unstructured", amount=0.25, apply_pruning=lambda e: e % 2, **pruning_kwargs)
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
enable_model_summary=False,
enable_checkpointing=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=3,
callbacks=[p1, p2],
)
with caplog.at_level(INFO):
trainer.fit(model)
actual = [m.strip() for m in caplog.messages]
actual = [m for m in actual if m.startswith("Applied")]
percentage = r"\(\d+(?:\.\d+)?%\)"
expected = [
rf"Applied `L1Unstructured`. Pruned: \d+\/1122 {percentage} -> \d+\/1122 {percentage}",
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=32, bias=True\).weight` with amount=0.5. Pruned: 0 \(0.00%\) -> \d+ {percentage}", # noqa: E501
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=2, bias=True\).weight` with amount=0.5. Pruned: 0 \(0.00%\) -> \d+ {percentage}", # noqa: E501
rf"Applied `RandomUnstructured`. Pruned: \d+\/1122 {percentage} -> \d+\/1122 {percentage}",
rf"Applied `RandomUnstructured` to `Linear\(in_features=32, out_features=32, bias=True\).weight` with amount=0.25. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
rf"Applied `RandomUnstructured` to `Linear\(in_features=32, out_features=2, bias=True\).weight` with amount=0.25. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
rf"Applied `L1Unstructured`. Pruned: \d+\/1122 {percentage} -> \d+\/1122 {percentage}",
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=32, bias=True\).weight` with amount=0.5. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=2, bias=True\).weight` with amount=0.5. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
]
expected = [re.compile(s) for s in expected]
assert all(regex.match(s) for s, regex in zip(actual, expected))
filepath = str(tmpdir / "foo.ckpt")
trainer.save_checkpoint(filepath)
model.load_from_checkpoint(filepath, strict=False)
has_pruning = hasattr(model.layer.mlp_1, "weight_orig")
assert not has_pruning if make_pruning_permanent else has_pruning
@pytest.mark.parametrize("prune_on_train_epoch_end", (False, True))
@pytest.mark.parametrize("save_on_train_epoch_end", (False, True))
def test_permanent_when_model_is_saved_multiple_times(
tmpdir, caplog, prune_on_train_epoch_end, save_on_train_epoch_end
):
"""When a model is saved multiple times and make_permanent=True, we need to make sure a copy is pruned and not
the trained model if we want to continue with the same pruning buffers."""
if prune_on_train_epoch_end and save_on_train_epoch_end:
pytest.xfail(
"Pruning sets the `grad_fn` of the parameters so we can't save"
" right after as pruning has not been made permanent"
)
class TestPruning(ModelPruning):
def on_save_checkpoint(self, trainer, pl_module, checkpoint):
had_buffers = hasattr(pl_module.layer.mlp_3, "weight_orig")
super().on_save_checkpoint(trainer, pl_module, checkpoint)
assert "layer.mlp_3.weight_orig" not in checkpoint["state_dict"]
if had_buffers:
assert hasattr(pl_module.layer.mlp_3, "weight_orig")
model = TestModel()
pruning_callback = TestPruning(
"random_unstructured",
parameters_to_prune=[(model.layer.mlp_3, "weight")],
verbose=1,
make_pruning_permanent=True,
prune_on_train_epoch_end=prune_on_train_epoch_end,
)
ckpt_callback = ModelCheckpoint(
monitor="test", save_top_k=2, save_last=True, save_on_train_epoch_end=save_on_train_epoch_end
)
trainer = Trainer(callbacks=[pruning_callback, ckpt_callback], max_epochs=3, enable_progress_bar=False)
with caplog.at_level(INFO):
trainer.fit(model)
actual = [m.strip() for m in caplog.messages]
actual = [m for m in actual if m.startswith("Applied")]
percentage = r"\(\d+(?:\.\d+)?%\)"
expected = [
rf"Applied `RandomUnstructured`. Pruned: \d+\/66 {percentage} -> \d+\/66 {percentage}",
rf"Applied `RandomUnstructured`. Pruned: \d+\/66 {percentage} -> \d+\/66 {percentage}",
rf"Applied `RandomUnstructured`. Pruned: \d+\/66 {percentage} -> \d+\/66 {percentage}",
]
expected = [re.compile(s) for s in expected]
assert all(regex.match(s) for s, regex in zip(actual, expected))
# removed on_train_end
assert not hasattr(model.layer.mlp_3, "weight_orig")
model.load_from_checkpoint(trainer.checkpoint_callback.kth_best_model_path)
assert not hasattr(model.layer.mlp_3, "weight_orig")
model.load_from_checkpoint(trainer.checkpoint_callback.last_model_path)
assert not hasattr(model.layer.mlp_3, "weight_orig")
| [
"torch.any",
"torch.allclose",
"torch.all",
"torch.nn.Linear"
] | 1.6 | qmpzzpmq/pytorch-lightning | 854bdc042d12fe4b713de881c58b025de30d0c39 |
0.6 | import os
import time
from cv2 import cv2
import numpy as np
from collections import defaultdict
from PIL import Image
from torch import torch
import torch.nn.functional as F
from torchvision import transforms as T
from face_utils import norm_crop, FaceDetector
from model_def import WSDAN, xception
class DFDCLoader:
def __init__(self, video_dir, face_detector, transform=None,
batch_size=25, frame_skip=9, face_limit=25):
self.video_dir = video_dir
self.file_list = sorted(f for f in os.listdir(video_dir) if f.endswith(".mp4"))
self.transform = transform
self.face_detector = face_detector
self.batch_size = batch_size
self.frame_skip = frame_skip
self.face_limit = face_limit
self.record = defaultdict(list)
self.score = defaultdict(lambda: 0.5) #最终的预测概率
self.feedback_queue = []
def iter_one_face(self):
for fname in self.file_list:
path = os.path.join(self.video_dir, fname)
reader = cv2.VideoCapture(path)
face_count = 0
while True:
for _ in range(self.frame_skip):
reader.grab()
success, img = reader.read()
# 检测这一帧是否存在
if not success:
break
boxes, landms = self.face_detector.detect(img)
if boxes.shape[0] == 0:
continue
areas = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
order = areas.argmax()
boxes = boxes[order]
landms = landms[order]
# Crop faces
landmarks = landms.numpy().reshape(5, 2).astype(np.int)
img = norm_crop(img, landmarks, image_size=320)
aligned = Image.fromarray(img[:, :, ::-1])
if self.transform:
aligned = self.transform(aligned)
yield fname, aligned
# Early stop
face_count += 1
if face_count == self.face_limit:
break
reader.release()
def __iter__(self):
self.record.clear()
self.feedback_queue.clear()
batch_buf = []
t0 = time.time()
batch_count = 0
for fname, face in self.iter_one_face():
self.feedback_queue.append(fname)
batch_buf.append(face)
if len(batch_buf) == self.batch_size:
yield torch.stack(batch_buf)
batch_count += 1
batch_buf.clear()
if batch_count % 10 == 0:
elapsed = 1000 * (time.time() - t0)
print("T: %.2f ms / batch" % (elapsed / batch_count))
if len(batch_buf) > 0:
yield torch.stack(batch_buf)
def feedback(self, pred):
accessed = set()
for score in pred:
fname = self.feedback_queue.pop(0)
accessed.add(fname)
self.record[fname].append(score)
for fname in sorted(accessed):
self.score[fname] = np.mean(self.record[fname])
print("[%s] %.6f" % (fname, self.score[fname]))
def main():
torch.set_grad_enabled(False)
torch.backends.cudnn.benchmark = True
test_dir = "./input/deepfake-detection-challenge/test_videos"
csv_path = "./input/deepfake-detection-challenge/sample_submission.csv"
face_detector = FaceDetector()
face_detector.load_checkpoint("./input/dfdc-pretrained-2/RetinaFace-Resnet50-fixed.pth")
loader = DFDCLoader(test_dir, face_detector, T.ToTensor())
model1 = xception(num_classes=2, pretrained=False)
ckpt = torch.load("./input/dfdc-pretrained-2/xception-hg-2.pth", map_location=torch.device('cpu'))
model1.load_state_dict(ckpt["state_dict"])
model1 = model1.cpu()
model1.eval()
model2 = WSDAN(num_classes=2, M=8, net="xception", pretrained=False).cpu()
ckpt = torch.load("./input/dfdc-pretrained-2/ckpt_x.pth", map_location=torch.device('cpu'))
model2.load_state_dict(ckpt["state_dict"])
model2.eval()
model3 = WSDAN(num_classes=2, M=8, net="efficientnet", pretrained=False).cpu()
ckpt = torch.load("./input/dfdc-pretrained-2/ckpt_e.pth", map_location=torch.device('cpu'))
model3.load_state_dict(ckpt["state_dict"])
model3.eval()
zhq_nm_avg = torch.Tensor([.4479, .3744, .3473]).view(1, 3, 1, 1).cpu()
zhq_nm_std = torch.Tensor([.2537, .2502, .2424]).view(1, 3, 1, 1).cpu()
for batch in loader:
batch = batch.cpu()
i1 = F.interpolate(batch, size=299, mode="bilinear")
i1.sub_(0.5).mul_(2.0)
o1 = model1(i1).softmax(-1)[:, 1].cpu().numpy()
i2 = (batch - zhq_nm_avg) / zhq_nm_std
o2, _, _ = model2(i2)
o2 = o2.softmax(-1)[:, 1].cpu().numpy()
i3 = F.interpolate(i2, size=300, mode="bilinear")
o3, _, _ = model3(i3)
o3 = o3.softmax(-1)[:, 1].cpu().numpy()
out = 0.2 * o1 + 0.7 * o2 + 0.1 * o3
loader.feedback(out)
with open(csv_path) as fin, open("submission.csv", "w") as fout:
fout.write(next(fin))
for line in fin:
fname = line.split(",", 1)[0]
pred = loader.score[fname]
print("%s,%.6f" % (fname, pred), file=fout)
if __name__ == "__main__":
main()
| [
"torch.nn.functional.interpolate",
"torch.torch.Tensor",
"torch.torch.device",
"torch.torch.set_grad_enabled",
"torch.torch.stack"
] | 0.6.3 | UnknwoonUser/kaggle-dfdc | 2088c2c54866b6a959e3477c1c7f277f82b9ebe3 |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates.
import os
import pickle
import re
from collections import OrderedDict
from copy import deepcopy
from dataclasses import dataclass
from enum import Enum
from typing import Any
import torch
import torchvision
from mmf.common.registry import registry
from mmf.modules.embeddings import ProjectionEmbedding, TextEmbedding
from mmf.modules.hf_layers import BertModelJit
from mmf.modules.layers import Identity
from mmf.utils.build import build_image_encoder, build_text_encoder
from mmf.utils.download import download_pretrained_model
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_absolute_path
from omegaconf import MISSING, OmegaConf
from torch import nn
from transformers.configuration_auto import AutoConfig
from transformers.modeling_auto import AutoModel
try:
from detectron2.modeling import ShapeSpec, build_resnet_backbone
except ImportError:
pass
class Encoder(nn.Module):
@dataclass
class Config:
name: str = MISSING
@classmethod
def from_params(cls, **kwargs):
config = OmegaConf.structured(cls.Config(**kwargs))
return cls(config)
class EncoderFactory(nn.Module):
@dataclass
class Config:
type: str = MISSING
params: Encoder.Config = MISSING
class ImageFeatureEncoderTypes(Enum):
default = "default"
identity = "identity"
projection = "projection"
frcnn_fc7 = "finetune_faster_rcnn_fpn_fc7"
class ImageFeatureEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
in_dim: int = MISSING
class ImageFeatureEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
type: ImageFeatureEncoderTypes = MISSING
params: ImageFeatureEncoder.Config = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
encoder_type = config.type
if isinstance(encoder_type, ImageFeatureEncoderTypes):
encoder_type = encoder_type.value
assert (
"in_dim" in config.params
), "ImageFeatureEncoder require 'in_dim' param in config"
params = config.params
if encoder_type == "default" or encoder_type == "identity":
self.module = Identity()
self.module.in_dim = params.in_dim
self.module.out_dim = params.in_dim
elif encoder_type == "projection":
if "module" not in params:
params = deepcopy(params)
params.module = "linear"
self.module = ProjectionEmbedding(**params)
elif encoder_type == "finetune_faster_rcnn_fpn_fc7":
self.module = FinetuneFasterRcnnFpnFc7(params)
else:
raise NotImplementedError("Unknown Image Encoder: %s" % encoder_type)
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
@registry.register_encoder("finetune_faster_rcnn_fpn_fc7")
class FinetuneFasterRcnnFpnFc7(ImageFeatureEncoder):
@dataclass
class Config(ImageFeatureEncoder.Config):
name: str = "finetune_faster_rcnn_fpn_fc7"
in_dim: int = MISSING
weights_file: str = "fc7_w.pkl"
bias_file: str = "fc7_b.pkl"
model_data_dir: str = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
model_data_dir = get_absolute_path(config.model_data_dir)
if not os.path.isabs(config.weights_file):
weights_file = os.path.join(model_data_dir, config.weights_file)
if not os.path.isabs(config.bias_file):
bias_file = os.path.join(model_data_dir, config.bias_file)
if not PathManager.exists(bias_file) or not PathManager.exists(weights_file):
download_path = download_pretrained_model("detectron.vmb_weights")
weights_file = get_absolute_path(os.path.join(download_path, "fc7_w.pkl"))
bias_file = get_absolute_path(os.path.join(download_path, "fc7_b.pkl"))
with PathManager.open(weights_file, "rb") as w:
weights = pickle.load(w)
with PathManager.open(bias_file, "rb") as b:
bias = pickle.load(b)
out_dim = bias.shape[0]
self.lc = nn.Linear(config.in_dim, out_dim)
self.lc.weight.data.copy_(torch.from_numpy(weights))
self.lc.bias.data.copy_(torch.from_numpy(bias))
self.out_dim = out_dim
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
old_prefix = prefix + "module."
for k in list(state_dict.keys()):
if k.startswith(old_prefix):
new_k = k.replace(old_prefix, prefix)
state_dict[new_k] = state_dict.pop(k)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, image):
i2 = self.lc(image)
i3 = nn.functional.relu(i2)
return i3
@registry.register_encoder("identity")
class IdentityEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "identity"
# Random in_dim if not specified
in_dim: int = 100
def __init__(self, config: Config):
super().__init__()
self.module = nn.Identity()
self.in_dim = config.in_dim
self.out_dim = config.in_dim
def forward(self, x):
return self.module(x)
class ImageEncoderTypes(Enum):
default = "default"
identity = "identity"
torchvision_resnet = "torchvision_resnet"
resnet152 = "resnet152"
detectron2_resnet = "detectron2_resnet"
class ImageEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
type: ImageEncoderTypes = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self._type = config.type
if isinstance(self._type, ImageEncoderTypes):
self._type = self._type.value
params = config.params
if self._type == "default" or self._type == "identity":
self.module = nn.Identity()
self.module.out_dim = params.in_dim
elif self._type == "resnet152":
self.module = ResNet152ImageEncoder(params)
elif self._type == "torchvision_resnet":
self.module = TorchvisionResNetImageEncoder(params)
elif self._type == "detectron2_resnet":
self.module = Detectron2ResnetImageEncoder(params)
else:
raise NotImplementedError("Unknown Image Encoder: %s" % self._type)
@property
def out_dim(self):
return self.module.out_dim
def forward(self, image):
return self.module(image)
# Taken from facebookresearch/mmbt with some modifications
@registry.register_encoder("resnet152")
class ResNet152ImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "resnet152"
pretrained: bool = True
# "avg" or "adaptive"
pool_type: str = "avg"
num_output_features: int = 1
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
model = torchvision.models.resnet152(pretrained=config.get("pretrained", True))
modules = list(model.children())[:-2]
self.model = nn.Sequential(*modules)
pool_func = (
nn.AdaptiveAvgPool2d if config.pool_type == "avg" else nn.AdaptiveMaxPool2d
)
# -1 will keep the original feature size
if config.num_output_features == -1:
self.pool = nn.Identity()
elif config.num_output_features in [1, 2, 3, 5, 7]:
self.pool = pool_func((config.num_output_features, 1))
elif config.num_output_features == 4:
self.pool = pool_func((2, 2))
elif config.num_output_features == 6:
self.pool = pool_func((3, 2))
elif config.num_output_features == 8:
self.pool = pool_func((4, 2))
elif config.num_output_features == 9:
self.pool = pool_func((3, 3))
self.out_dim = 2048
def forward(self, x):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
out = self.pool(self.model(x))
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous()
return out # BxNx2048
@registry.register_encoder("torchvision_resnet")
class TorchvisionResNetImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "resnet50"
pretrained: bool = False
zero_init_residual: bool = True
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
model = getattr(torchvision.models, config.name)(
pretrained=config.pretrained, zero_init_residual=config.zero_init_residual
)
# Set avgpool and fc layers in torchvision to Identity.
model.avgpool = Identity()
model.fc = Identity()
self.model = model
self.out_dim = 2048
def forward(self, x):
# B x 3 x 224 x 224 -> B x 2048 x 7 x 7
out = self.model(x)
return out
@registry.register_encoder("detectron2_resnet")
class Detectron2ResnetImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "detectron2_resnet"
pretrained: bool = True
pretrained_path: str = None
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
pretrained = config.get("pretrained", False)
pretrained_path = config.get("pretrained_path", None)
self.resnet = build_resnet_backbone(config, ShapeSpec(channels=3))
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
pretrained_path, progress=False
)
new_state_dict = OrderedDict()
replace_layer = {"backbone.": ""}
for key, value in state_dict["model"].items():
new_key = re.sub(
r"(backbone\.)", lambda x: replace_layer[x.groups()[0]], key
)
new_state_dict[new_key] = value
self.resnet.load_state_dict(new_state_dict, strict=False)
self.out_dim = 2048
def forward(self, x):
x = self.resnet(x)
return x["res5"]
class TextEncoderTypes(Enum):
identity = "identity"
transformer = "transformer"
embedding = "embedding"
class TextEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
# identity, transformer or embedding as of now
type: TextEncoderTypes = MISSING
params: Encoder.Config = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self._type = config.type
if isinstance(self._type, TextEncoderTypes):
self._type = self._type.value
if self._type == "identity":
self.module = nn.Identity()
elif self._type == "transformer":
self._module = TransformerEncoder(config.params)
self.module = self._module.module
elif self._type == "embedding":
self.module = TextEmbeddingEncoder(config.params)
else:
raise NotImplementedError(f"Unknown Text Encoder {self._type}")
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
@registry.register_encoder("text_embedding")
class TextEmbeddingEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "text_embedding"
operator: str = MISSING
# Keeping this Any for now as this
# needs a separate refactor PR.
embedding_params: Any = MISSING
def __init__(self, config: Config):
super().__init__()
self._operator = config.operator
self._embedding_params = config.embedding_params
self.module = TextEmbedding(
self._embedding_params.type, **self._embedding_params.params
)
def forward(self, x):
x = self.module(x)
if self._operator == "sum":
x = x.sum(dim=1)
elif self._operator == "concat":
x = torch.cat(x, dim=1)
elif self._operator == "mul":
x = torch.prod(x, dim=1)
return x.squeeze()
@registry.register_encoder("transformer")
class TransformerEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "transformer"
num_segments: int = 2
bert_model_name: str = "bert-base-uncased"
# Options below can be overridden to update the bert configuration used
# to initialize the bert encoder. If some option is missing or
# if you are using an encoder different then BERT, add extra parameters
# by inheriting and extending this config
# Those options will automatically override the options for your transformer
# encoder's configuration. For e.g. vocab_size is missing here, just add
# vocab_size: x to update the size of the vocabulary with which encoder is
# initialized. If you update the default values, the transformer you
# will get will be initialized from scratch.
hidden_size: int = 768
num_hidden_layers: int = 12
num_attention_heads: int = 12
output_attentions: bool = False
output_hidden_states: bool = False
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
hf_params = {"config": self._build_encoder_config(config)}
# For BERT models, initialize using Jit version
if self.config.bert_model_name.startswith("bert-"):
self.module = BertModelJit.from_pretrained(
self.config.bert_model_name, **hf_params
)
else:
self.module = AutoModel.from_pretrained(
self.config.bert_model_name, **hf_params
)
self.embeddings = self.module.embeddings
self.original_config = self.config
self.config = self.module.config
self._init_segment_embeddings()
def _init_segment_embeddings(self):
if self.original_config.get("num_segments", None):
num_segments = self.original_config.num_segments
if hasattr(self.embeddings, "token_type_embeddings"):
new_embeds = nn.Embedding(num_segments, self.config.hidden_size)
new_embeds.weight.data[:2].copy_(
self.embeddings.token_type_embeddings.weight
)
for idx in range(2, num_segments - 1):
new_embeds.weight.data[idx].copy_(
self.embeddings.token_type_embeddings.weight.data.mean(dim=0)
)
self.embeddings.token_type_embeddings = new_embeds
def _build_encoder_config(self, config: Config):
return AutoConfig.from_pretrained(
self.config.bert_model_name, **OmegaConf.to_container(self.config)
)
def forward(self, *args, **kwargs):
# Only return pooled output
return self.module(*args, **kwargs)[1]
class MultiModalEncoderBase(Encoder):
__jit_unused_properties__ = ["encoder_config"]
@dataclass
class Config(Encoder.Config):
# This actually is Union[ImageEncoderConfig, ImageFeatureEncoderConfig]
modal_encoder: EncoderFactory.Config = ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152, params=ResNet152ImageEncoder.Config()
)
text_encoder: EncoderFactory.Config = TextEncoderFactory.Config(
type=TextEncoderTypes.transformer, params=TransformerEncoder.Config()
)
direct_features_input: bool = False
modal_hidden_size: int = 2048
text_hidden_size: int = 768
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
self._modal_encoder_config = self.config.get("modal_encoder", None)
self._is_direct_features_input = self.config.get("direct_features_input", False)
self.build()
self.modal_hidden_size = self.config.get("modal_hidden_size", None)
self.text_hidden_size = self.config.get("text_hidden_size", None)
def build(self):
encoders = self._build_encoders(self.config)
self.text_encoder, self.modal_encoder = encoders[0], encoders[1]
self._encoder_config = None
if self.text_encoder:
self._encoder_config = self.text_encoder.config
@property
def encoder_config(self):
return self._encoder_config
def _build_encoders(self, config):
text_encoder = None
if config.get("text_encoder", None):
text_encoder = build_text_encoder(config.text_encoder)
modal_encoder = None
if config.get("modal_encoder", None):
modal_encoder = self._build_modal_encoder(config.modal_encoder)
return (text_encoder, modal_encoder)
def _build_modal_encoder(self, config):
return build_image_encoder(
config, direct_features=self._is_direct_features_input
)
| [
"torch.nn.Linear",
"torch.nn.Identity",
"torch.cat",
"torch.prod",
"torch.flatten",
"torch.nn.Sequential",
"torch.from_numpy",
"torch.hub.load_state_dict_from_url",
"torch.nn.functional.relu",
"torch.nn.Embedding"
] | 1.6.0 | madian9/mmf | 6db9048c848a178872d1aa1a14ee0009de703750 |
0.6 | from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
__all__ = ['GradualWarmupScheduler']
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater thant or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_last_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
self._last_lr = self.after_scheduler.get_last_lr()
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
if __name__ == '__main__':
# https://github.com/ildoonet/pytorch-gradual-warmup-lr
import torch
from torch.optim.lr_scheduler import StepLR, ExponentialLR
from torch.optim.sgd import SGD
model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]
optim = SGD(model, 0.1)
# scheduler_warmup is chained with schduler_steplr
scheduler_steplr = StepLR(optim, step_size=10, gamma=0.1)
scheduler_warmup = GradualWarmupScheduler(optim, multiplier=1, total_epoch=5, after_scheduler=scheduler_steplr)
# this zero gradient update is needed to avoid a warning message, issue #8.
optim.zero_grad()
optim.step()
for epoch in range(1, 20):
scheduler_warmup.step(epoch)
print(epoch, optim.param_groups[0]['lr'])
optim.step() # backward pass (update network)
| [
"torch.optim.sgd.SGD",
"torch.optim.lr_scheduler.StepLR",
"torch.randn"
] | 0.6.3 | mesuga-reymond/change_detection.pytorch | cc9a4aec59e13fb45f22b9d9d3f4c735bd257892 |
1.0 | import torch
from apex import amp
from math import ceil
import random
import PIL
from tqdm import tqdm
#torch.multiprocessing.set_start_method('spawn', force=True)
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
urbangan_dir = os.path.dirname(current_dir)
sys.path.insert(0, urbangan_dir)
from tools.options import Options
from tools.engine import Engine
from tools.logger import Logger
from tools.utils import get_confusion_matrix
from models.segmentor.models.segmentor import Segmentor
from models.segmentor_plus.models.segmentor import Segmentor as SegmentorPlus
from inplace_abn import InPlaceABN, InPlaceABNSync
from data.base_dataset import get_transform
#torch.autograd.set_detect_anomaly(True)
class SegmentorTrainer:
def __init__(self, opt):
self.opt = opt["segmentor"]
self.tgt_dataset_opt = opt["extra_dataset"]
self.all_opt = opt
if self.opt.advent or self.opt.duo:
assert self.tgt_dataset_opt.dataset is not None
print(f"Target pairs for duo/advent training are loaded from [{self.tgt_dataset_opt.dataset}] dataset")
def step_model(self, data, global_iteration, log):
# try:
# import pdb; pdb.set_trace()
if self.opt.advent or self.opt.duo:
tgt_data = self.next_tgt_batch()
self.opt_s.zero_grad()
if self.opt.advent:
loss, pred_data, delta = self.segmentor(data, tgt_data, global_iteration=global_iteration, log=log, mode='segmentor_advent', hard=False)
else:
loss, _, delta = self.segmentor(data, global_iteration=global_iteration, log=log, mode='segmentor', hard=False)
loss = self.engine.all_reduce_tensor(loss)
if self.opt.use_amp:
with amp.scale_loss(loss, self.opt_s) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if self.opt.duo:
loss, _, _ = self.segmentor(tgt_data, global_iteration=global_iteration, log=log, mode='segmentor', hard=False, suffix="_for_duo")
loss = self.engine.all_reduce_tensor(loss)
if self.opt.use_amp:
with amp.scale_loss(loss, self.opt_s) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.opt_s.step()
if self.opt.advent:
self.opt_d.zero_grad()
loss = self.segmentor({}, pred_data=pred_data, global_iteration=global_iteration, log=log, mode='discriminator_advent')
loss = self.engine.all_reduce_tensor(loss)
if self.opt.use_amp:
with amp.scale_loss(loss, self.opt_d) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.opt_d.step()
# record delta for synthetic dataset sampler
if self.opt.synthetic_dataset and not self.opt.semi:
self.train_dataset.set_batch_delta(delta)
# print("allocated", torch.cuda.memory_allocated())
# print("cached", torch.cuda.memory_cached())
# torch.cuda.empty_cache()
# import pdb; pdb.set_trace()
# except:
# print("error at iteration", global_iteration)
# import pdb; pdb.set_trace()
def slide_pred(self, data, window_size):
img = data["img"]
b, c, h, w = img.shape
pred_sem_seg = torch.zeros((b, self.opt.num_semantics, h, w)).cuda()
pred_sem_count = torch.zeros((h, w)).cuda()
min_overlap = 1 / 3
win_h, win_w = window_size
win_rows = int(ceil((h - win_h) / (win_h * (1 - min_overlap)))) + 1
win_cols = int(ceil((w - win_w) / (win_w * (1 - min_overlap)))) + 1
overlap_h = 1 - (h - win_h) / (win_h * (win_rows - 1)) if win_rows > 1 else 0
overlap_w = 1 - (w - win_w) / (win_w * (win_cols - 1)) if win_cols > 1 else 0
stride_h = (1 - overlap_h) * win_h
stride_w = (1 - overlap_w) * win_w
for row in range(win_rows):
for col in range(win_cols):
x1 = int(col * stride_w)
y1 = int(row * stride_h)
x2 = x1 + win_w
y2 = y1 + win_h
slide_data = {"img": img[:, :, y1:y2, x1:x2]}
pred = self.segmentor(slide_data, mode='inference', hard=False)
pred_sem_seg[:, :, y1:y2, x1:x2] = pred["sem_seg"]
pred_sem_count[y1:y2, x1:x2] += 1
pred_sem_seg /= pred_sem_count
return {"sem_seg": pred_sem_seg}
def eval_model(self, data, global_iteration, log):
if self.opt.slide_eval:
pred_seg = self.slide_pred(data, window_size=self.opt.fixed_crop)
if self.opt.multi_scale_eval:
if self.opt.aspect_ratio > 1:
size = [self.opt.fixed_crop[0], int(self.opt.fixed_crop[0] * self.opt.aspect_ratio)]
else:
size = [int(self.opt.fixed_crop[1] / self.opt.aspect_ratio), self.opt.fixed_crop[1]]
small_data = {"img": torch.nn.functional.interpolate(data["img"], size=size, mode="bilinear")}
pred_small_seg = self.slide_pred(small_data, window_size=self.opt.fixed_crop)
resized_seg = torch.nn.functional.interpolate(pred_small_seg["sem_seg"], size=data["img"].shape[-2:], mode="bilinear")
pred_seg["sem_seg"] = (pred_seg["sem_seg"] + resized_seg) / 2
else:
pred_seg = self.segmentor(data, global_iteration=global_iteration, mode='inference', hard=False)
pred_sem_seg = pred_seg["sem_seg"]
sem_index_pred = pred_sem_seg.max(dim=1, keepdim=True)[1]
if "flat_seg" in data and not 0 in data["flat_seg"].size():
real_sem_seg = data["flat_seg"].unsqueeze(1).cuda()
sem_index_real = data["flat_seg"].unsqueeze(1).cuda()
else:
real_sem_seg = data["sem_seg"].cuda()
sem_index_real = real_sem_seg.max(dim=1, keepdim=True)[1]
if log:
self.segmentor_on_one_gpu.logger.log_img("segmentor/val/img", data["img"][:16].cpu(), 4, global_iteration, normalize=True, range=(-1, 1))
self.segmentor_on_one_gpu.logger.log_semantic_seg("segmentor/val/real", real_sem_seg[:16].cpu(), 4, global_iteration)
self.segmentor_on_one_gpu.logger.log_semantic_seg("segmentor/val/pred", pred_sem_seg[:16].cpu(), 4, global_iteration)
confusion_matrix = get_confusion_matrix(sem_index_real, sem_index_pred, self.opt.num_semantics)
return confusion_matrix
def update_learning_rate(self, global_iteration, min_lr=1e-6):
total_iterations = int(self.opt.niter * self.dataset_size / self.opt.batch_size)
lr = max(self.opt.lr * ((1 - float(global_iteration) / total_iterations) ** (self.opt.power)), min_lr)
if self.opt.plus:
self.opt_s.param_groups[0]['lr'] = 0.1 * lr
self.opt_s.param_groups[1]['lr'] = lr
else:
self.opt_s.param_groups[0]['lr'] = lr
if self.opt.advent:
advent_lr = max(self.opt.advent_lr * ((1 - float(global_iteration) / total_iterations) ** (self.opt.power)), min_lr)
self.opt_d.param_groups[0]['lr'] = advent_lr
return lr
def next_tgt_batch(self):
try:
return next(self.loader_iter_tgt)
except StopIteration:
self.loader_iter_tgt = iter(self.target_dataloader)
return next(self.loader_iter_tgt)
def preprocess_data(self, data):
if self.opt.sample_fixed_crop is not None:
# new_img = []
# new_sem = []
h = int(self.opt.dim)
w = int(self.opt.dim * self.opt.aspect_ratio)
h_crop = self.opt.sample_fixed_crop[0]
w_crop = self.opt.sample_fixed_crop[1]
max_zoom = 1. # self.opt.max_zoom
zoom = self.opt.min_zoom + random.random() * (max_zoom - self.opt.min_zoom)
h_scaled = int(h * zoom)
w_scaled = int(w * zoom)
scale = (h_scaled, w_scaled)
assert h_scaled - h_crop >= 0
assert w_scaled - w_crop >= 0
top_crop = int(random.random() * (h_scaled - h_crop))
left_crop = int(random.random() * (w_scaled - w_crop))
data["img"] = torch.nn.functional.interpolate(data["img"], size=scale, mode="bilinear")
data["img"] = data["img"][:, :, top_crop:top_crop + h_crop, left_crop:left_crop + w_crop]
data["sem_seg"] = torch.nn.functional.interpolate(data["sem_seg"], size=scale, mode="nearest")
data["sem_seg"] = data["sem_seg"][:, :, top_crop:top_crop + h_crop, left_crop:left_crop + w_crop]
return data
if self.opt.sample_random_crop:
h = int(self.opt.dim)
w = int(self.opt.dim * self.opt.aspect_ratio)
max_zoom = self.opt.max_zoom
zoom = self.opt.min_zoom + random.random() * (max_zoom - self.opt.min_zoom)
h_scaled = int(h * zoom)
w_scaled = int(w * zoom)
scale = (h_scaled, w_scaled)
assert h_scaled - h >= 0
assert w_scaled - w >= 0
top_crop = int(random.random() * (h_scaled - h))
left_crop = int(random.random() * (w_scaled - w))
data["img"] = torch.nn.functional.interpolate(data["img"], size=scale, mode="bilinear")
data["img"] = data["img"][:, :, top_crop:top_crop + h, left_crop:left_crop + w]
data["sem_seg"] = torch.nn.functional.interpolate(data["sem_seg"], size=scale, mode="nearest")
data["sem_seg"] = data["sem_seg"][:, :, top_crop:top_crop + h, left_crop:left_crop + w]
return data
else:
return data
def run(self):
with Engine(self.opt) as engine:
self.engine = engine
self.train_dataset = engine.create_dataset(self.all_opt, load_seg=True, load_img=True, is_synthetic=self.opt.synthetic_dataset, is_semi=self.opt.semi)
self.train_dataloader, self.datasampler = engine.create_dataloader(self.train_dataset, self.opt.batch_size, self.opt.num_workers, is_train=True, is_synthetic=self.opt.synthetic_dataset)
if self.opt.advent or self.opt.duo:
self.target_dataset = engine.create_dataset(self.tgt_dataset_opt, load_seg=True, load_img=True)
self.target_dataloader, self.target_datasampler = engine.create_dataloader(self.target_dataset, self.opt.batch_size, self.opt.num_workers, True)
self.loader_iter_tgt = iter(self.target_dataloader)
eval_opt = self.opt if self.opt.eval_dataset == "base" else self.tgt_dataset_opt
self.valid_dataset = engine.create_dataset(eval_opt, load_seg=True, load_img=True, phase="valid")
eval_batch_size = self.opt.force_eval_batch_size if self.opt.force_eval_batch_size is not None else self.opt.batch_size
if not self.opt.no_eval:
self.valid_dataloader, _ = engine.create_dataloader(self.valid_dataset, eval_batch_size, self.opt.num_workers, False)
is_main = self.opt.local_rank == 0
logger = Logger(self.opt) if is_main else None
if self.opt.plus:
self.segmentor_on_one_gpu = SegmentorPlus(self.opt, is_train=True, is_main=is_main, logger=logger, distributed=self.engine.distributed)
else:
self.segmentor_on_one_gpu = Segmentor(self.opt, is_train=True, is_main=is_main, logger=logger, distributed=self.engine.distributed)
self.opt_s = self.segmentor_on_one_gpu.opt_s
self.opt_d = self.segmentor_on_one_gpu.opt_d
if self.opt.use_amp:
optimizer = [self.opt_s, self.opt_d] if self.opt.advent else self.opt_s
self.segmentor_on_one_gpu, optimizer = amp.initialize(self.segmentor_on_one_gpu, optimizer,
opt_level=self.opt.amp_level)
self.segmentor_on_one_gpu.apply(lambda x: cast_running_stats(x, self.engine.distributed))
self.segmentor = engine.data_parallel(self.segmentor_on_one_gpu)
self.segmentor.train()
if self.opt.cont_train:
start_epoch = self.opt.which_iter
else:
start_epoch = 0
end_epoch = self.opt.niter
self.dataset_size = len(self.train_dataset) * self.opt.batch_size if self.opt.synthetic_dataset else len(self.train_dataset)
global_iteration = start_epoch * int(self.dataset_size / self.opt.batch_size)
for epoch in range(start_epoch, end_epoch):
if self.engine.distributed:
self.datasampler.set_epoch(epoch)
if self.opt.advent:
self.target_datasampler.set_epoch(epoch)
for i, data_i in enumerate(self.train_dataloader):
global_iteration += 1
log = global_iteration % self.opt.log_freq == 0 and is_main
data_i = self.preprocess_data(data_i)
self.step_model(data_i, global_iteration, log=log)
lr = self.update_learning_rate(global_iteration)
if log:
self.segmentor_on_one_gpu.logger.log_scalar("segmentor/learning_rate", lr, global_iteration)
print(f"[Ep{epoch}/{end_epoch}] Iteration {i + 1:05d}/{int(self.dataset_size / self.opt.batch_size):05d}")
# update sampling weights of synthetic dataset
if self.opt.synthetic_dataset and not self.opt.semi:
self.train_dataset.update_sampler(logger=logger, log=is_main, global_iteration=global_iteration)
if self.opt.save_freq > 0 and epoch % self.opt.save_freq == 0 and is_main:
self.segmentor_on_one_gpu.save_model(epoch, latest=False)
if self.opt.save_latest_freq > 0 and epoch % self.opt.save_latest_freq == 0 and is_main:
self.segmentor_on_one_gpu.save_model(epoch, latest=True)
if epoch % self.opt.eval_freq == 0 and not self.opt.no_eval:
self.segmentor.eval()
with torch.no_grad():
confusion_matrix = torch.zeros((self.opt.num_semantics, self.opt.num_semantics)).cuda()
for i, data_i in tqdm(enumerate(self.valid_dataloader), desc='eval', total=len(self.valid_dataloader)):
confusion_matrix += self.eval_model(data_i, global_iteration, log=i == 0)
confusion_matrix = self.engine.all_reduce_tensor(confusion_matrix, norm=False)
pos = confusion_matrix.sum(dim=1)
res = confusion_matrix.sum(dim=0)
tp = torch.diag(confusion_matrix)
iou = (tp / torch.max(torch.Tensor([1.0]).to(pos.get_device()), pos + res - tp))
mean_iou = iou.mean()
pos_eval = pos[self.opt.eval_idx]
res_eval = confusion_matrix[self.opt.eval_idx].sum(dim=0)[self.opt.eval_idx]
tp_eval = tp[self.opt.eval_idx]
iou_eval = (tp_eval / torch.max(torch.Tensor([1.0]).to(pos.get_device()), pos_eval + res_eval - tp_eval))
mean_iou_eval = iou_eval.mean()
if is_main:
self.segmentor_on_one_gpu.logger.log_scalar("segmentor/val/mean_iou", mean_iou, global_iteration)
self.segmentor_on_one_gpu.logger.log_scalar("segmentor/val/mean_iou_eval", mean_iou_eval, global_iteration)
for i in range(len(iou)):
self.segmentor_on_one_gpu.logger.log_scalar(f"segmentor/val/iou/{self.opt.semantic_labels[i].replace(' ', '_')}", iou[i], global_iteration)
self.segmentor_on_one_gpu.logger.log_confusion_matrix("segmentor/val/confusion_matrix", confusion_matrix.cpu(), global_iteration)
self.segmentor.train()
print('Training was successfully finished.')
def cast_running_stats(m, distributed):
ABN = InPlaceABNSync if distributed else InPlaceABN
if isinstance(m, ABN):
m.running_mean = m.running_mean.float()
m.running_var = m.running_var.float()
if __name__ == "__main__":
opt = Options().parse(load_segmentor=True, load_seg_generator=True, load_img_generator=True, load_extra_dataset=True, save=True)
SegmentorTrainer(opt).run()
| [
"torch.zeros",
"torch.nn.functional.interpolate",
"torch.no_grad",
"torch.diag",
"torch.Tensor"
] | 1.0.0 | valeoai/SemanticPalette | a1b02a384c09881d6f1ca1a0c0ebfd87278c3d7d |
1.0 | import torch
import matplotlib.pyplot as plt
from matplotlib import cm
from tqdm import tqdm
import imageio
import numpy as np
import pandas as pd
from copy import deepcopy
import random
from glob import glob
import math
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
urbangan_dir = os.path.dirname(current_dir)
sys.path.insert(0, urbangan_dir)
from tools.options import Options
from tools.engine import Engine
from tools.logger import Logger
from data import create_dataset
from tools.utils import create_colormap, color_transfer, color_spread, save_image
from tools.cond_sampler import CondSampler
from models.seg_img_model import SegImgModel
from models.seg_generator.modules.instance_refiner import InstanceRefiner
class Visualizer:
def __init__(self, opt):
self.opt = opt["base"]
self.seg_opt = opt["seg_generator"]
self.img_opt = opt["img_generator"]
self.cond_sampler = CondSampler(opt)
self.batch_size = self.opt.batch_size
self.vis_steps = self.opt.vis_steps
self.ins_refiner = InstanceRefiner(self.opt)
def save_full_res(self, savefile, real_cond, fake_cond, imgs, real_ins=None, fake_ins=None):
save_dir = os.path.basename(savefile).split('.')[0]
save_folder = os.path.join(self.opt.log_path, save_dir)
print("Saving imgs at full res in", save_folder)
os.mkdir(save_folder)
# save img to png
for i, img_list in enumerate(imgs):
for j, img in enumerate(img_list):
img_path = os.path.join(save_folder, f"img_{i}_{j}.png")
np_img = (img.permute(1,2,0).numpy() * 255).astype(np.uint8)
save_image(np_img, img_path)
# save prop to csv
csv_file = os.path.join(save_folder, "prop.csv")
excel_file = os.path.join(save_folder, "prop.xlsx")
columns = ["img_id", "prop"] + self.opt.semantic_labels
if real_ins is not None:
columns += ["ins_" + self.opt.semantic_labels[k] for k in self.opt.things_idx]
real_ins = real_ins.numpy()
fake_ins = fake_ins.numpy()
df = pd.DataFrame(columns=columns)
real_cond = real_cond.numpy()
fake_cond = fake_cond.numpy()
for i in range(len(real_cond)):
real_raw = {"img_id": str(i), "prop": "target"}
gen_raw = {"img_id": str(i), "prop": "generated"}
for j, label in enumerate(self.opt.semantic_labels):
real_raw[label] = real_cond[i][j]
gen_raw[label] = fake_cond[i][j]
if real_ins is not None:
for j, idx in enumerate(self.opt.things_idx):
label = "ins_" + self.opt.semantic_labels[idx]
real_raw[label] = real_ins[i][j]
gen_raw[label] = fake_ins[i][j]
df = df.append(real_raw, ignore_index=True)
df = df.append(gen_raw, ignore_index=True)
df.to_csv(csv_file, sep=",", index=False)
df.to_excel(excel_file)
def fixedsem_manipulation(self, savefile):
data = self.next_batch()
data["z_seg"] = torch.randn(self.vis_steps, self.seg_opt.latent_dim)
data["sem_cond"] = data["sem_cond"][0].repeat(self.vis_steps, 1)
fake_data = []
batch_num = self.vis_steps // self.batch_size
for i in range(batch_num):
start = i * self.batch_size
end = min(start + self.batch_size, self.vis_steps)
batch_data = {k: v[start:end] if v.size(0) > 0 else v for k, v in data.items()}
fake_data.append(self.seg_img_model(data=batch_data, mode='inference', log=True, as_list=False))
fake_data = {k: torch.cat([fake[k] for fake in fake_data], dim=0) for k in fake_data[0].keys()}
fake_sem_seg = fake_data["sem_seg"]
fake_sem_cond = torch.mean(fake_sem_seg, dim=(2, 3)).cpu()
fake_pred = torch.argmax(fake_sem_seg, dim=1, keepdim=True).cpu()
fake_sem_img = (color_transfer(fake_pred, self.colormap) + 1) / 2
fake_img = (fake_data["img"].cpu() + 1) / 2
real_sem_seg = data["sem_seg"]
real_sem_cond = data["sem_cond"].cpu()
real_pred = torch.argmax(real_sem_seg, dim=1, keepdim=True).cpu()
real_sem_img = (color_transfer(real_pred, self.colormap) + 1) / 2
real_img = (data["img"].cpu() + 1) / 2
if self.opt.save_full_res:
self.save_full_res(savefile, real_sem_cond, fake_sem_cond, [real_img,
real_sem_img,
fake_img,
fake_sem_img])
def scrop_manipulation(self, savefile):
data = self.next_batch()
data["z_seg"] = torch.randn(self.vis_steps, self.seg_opt.latent_dim)
fake_data = []
batch_num = self.vis_steps // self.batch_size
for i in range(batch_num):
start = i * self.batch_size
end = min(start + self.batch_size, self.vis_steps)
batch_data = {k: v[start:end] if v.size(0) > 0 else v for k, v in data.items()}
fake_data.append(self.seg_img_model(data=batch_data, mode='inference', log=True, as_list=False))
fake_data = {k: torch.cat([fake[k] for fake in fake_data], dim=0) for k in fake_data[0].keys()}
fake_sem_seg = fake_data["sem_seg"]
fake_sem_cond = torch.mean(fake_sem_seg, dim=(2, 3)).cpu()
fake_pred = torch.argmax(fake_sem_seg, dim=1, keepdim=True).cpu()
fake_raw = self.process_raw_sem(fake_data)
fake_sem_img = (color_transfer(fake_pred, self.colormap) + 1) / 2
fake_img = (fake_data["img"].cpu() + 1) / 2
fake_raw_img = (color_transfer(fake_raw, self.colormap) + 1) / 2
real_sem_seg = data["sem_seg"]
real_sem_cond = torch.mean(real_sem_seg, dim=(2, 3)).cpu()
real_pred = torch.argmax(real_sem_seg, dim=1, keepdim=True).cpu()
real_raw = fake_data["real_cropped"].cpu()
real_raw_pred = torch.argmax(real_raw, dim=1, keepdim=True)
real_raw_pred[torch.max(real_raw, dim=1, keepdim=True)[0] == 1. / self.opt.num_semantics] = -1
real_sem_img = (color_transfer(real_pred, self.colormap) + 1) / 2
real_img = (data["img"].cpu() + 1) / 2
real_raw_img = (color_transfer(real_raw_pred, self.colormap) + 1) / 2
if self.opt.save_full_res:
self.save_full_res(savefile, real_sem_cond, fake_sem_cond, [real_img,
real_sem_img,
real_raw_img,
fake_img,
fake_sem_img,
fake_raw_img])
def latent_interpolation(self, savefile):
z0 = torch.randn(self.seg_opt.latent_dim)
z1 = torch.randn(self.seg_opt.latent_dim)
z = [z0 * i / (self.batch_size - 1) + z1 * (1 - i / (self.batch_size - 1)) for i in range(self.batch_size)]
data = {"z_seg": torch.stack(z)}
c = self.cond_sampler.sample_batch(1)
ins_cond = torch.cat([c["ins_cond"]] * self.batch_size, dim=0)
sem_cond = torch.cat([c["sem_cond"]] * self.batch_size, dim=0)
cond = {"ins_cond": ins_cond, "sem_cond": sem_cond}
data.update(cond)
fake_data = self.seg_img_model(data=data, mode='inference', log=True, as_list=False)
if self.opt.vis_ins:
self.plot_ins_interpolation(fake_data, ins_cond, sem_cond, savefile)
else:
self.plot_sem_interpolation(fake_data, sem_cond, savefile)
def cond_interpolation(self, savefile):
z = torch.randn(self.seg_opt.latent_dim)
data = {"z_seg": torch.stack([z] * self.batch_size)}
c0 = self.cond_sampler.sample_batch(1)
ins0 = c0["ins_cond"]
sem0 = c0["sem_cond"]
c1 = self.cond_sampler.sample_batch(1)
ins1 = c1["ins_cond"]
sem1 = c1["sem_cond"]
ins = [ins0 * i / (self.batch_size - 1) + ins1 * (1 - i / (self.batch_size - 1)) for i in range(self.batch_size)]
sem = [sem0 * i / (self.batch_size - 1) + sem1 * (1 - i / (self.batch_size - 1)) for i in range(self.batch_size)]
ins_cond = torch.cat(ins, dim=0)
sem_cond = torch.cat(sem, dim=0)
cond = {"ins_cond": ins_cond, "sem_cond": sem_cond}
data.update(cond)
fake_data = self.seg_img_model(data=data, mode='inference', log=True, as_list=False)
if self.opt.vis_ins:
self.plot_ins_interpolation(fake_data, ins_cond, sem_cond, savefile)
else:
self.plot_sem_interpolation(fake_data, sem_cond, savefile)
def ins_manipulation(self, idx, sem_min, sem_max, ins_min, ins_max, savefile):
z = torch.randn(self.seg_opt.latent_dim)
data = {"z_seg": torch.stack([z] * self.vis_steps)}
c0 = self.cond_sampler.sample_batch(1)
ins0 = c0["ins_cond"]
sem0 = c0["sem_cond"]
sem0[0, idx] = sem_min
sem0 /= torch.sum(sem0)
sem1 = sem0.clone()
sem1[0, idx] = sem_max
sem1 /= torch.sum(sem1)
thing_idx = self.opt.things_idx.index(idx)
ins0[:, thing_idx] = ins_min
ins1 = ins0.clone()
ins1[:, thing_idx] = ins_max
ins = [ins0 * i / (self.vis_steps - 1) + ins1 * (1 - i / (self.vis_steps - 1)) for i in range(self.vis_steps)]
sem = [sem0 * i / (self.vis_steps - 1) + sem1 * (1 - i / (self.vis_steps - 1)) for i in range(self.vis_steps)]
ins_cond = torch.cat(ins, dim=0)
sem_cond = torch.cat(sem, dim=0)
cond = {"ins_cond": ins_cond, "sem_cond": sem_cond}
data.update(cond)
fake_data = []
batch_num = math.ceil(1. * self.vis_steps / self.batch_size)
for i in range(batch_num):
start = i * self.batch_size
end = min(start + self.batch_size, self.vis_steps)
batch_data = {k: v[start:end] if v.size(0) > 0 else v for k, v in data.items()}
fake_data.append(self.seg_img_model(data=batch_data, mode='inference', log=True, as_list=False))
fake_data = {k: torch.cat([fake[k] for fake in fake_data], dim=0) for k in fake_data[0].keys()}
self.plot_ins_interpolation(fake_data, ins_cond, sem_cond, savefile)
def get_style(self):
if self.opt.vis_random_style:
return self.get_random_style()
else:
return self.get_mean_style()
def get_random_style(self):
obj_dic = {}
for i in range(self.opt.num_semantics):
if i == 5: # right and left eyes should have same style
pass
style_code_path = random.choice(self.style_codes[i])
style = np.load(style_code_path)
obj_dic[str(i)] = {'ACE': torch.tensor(style).cuda()}
if i == 4: # right and left eyes should have same style
style = np.load(style_code_path.replace("/4/", "/5/"))
obj_dic[str(i + 1)] = {'ACE': torch.tensor(style).cuda()}
return obj_dic
def get_mean_style(self):
obj_dic = {}
for i in range(self.opt.num_semantics):
folder_path = os.path.join(self.opt.extraction_path, "mean_style_code", "mean", str(i))
style_code_path = os.path.join(folder_path, 'ACE.npy')
style = np.load(style_code_path)
obj_dic[str(i)] = {'ACE': torch.tensor(style).cuda()}
return obj_dic
def face_manipulation(self, savefile, bg_to_white=False, mode=None):
data = self.next_batch()
img = data["img"]
sem_seg = data["sem_seg"]
force_mean_idx = []
if bg_to_white:
bg_mask = sem_seg[:, 0] == 1
img.permute(0,2,3,1)[bg_mask] = 1.
data["img"] = img.repeat(self.vis_steps, 1, 1, 1)
index = sem_seg.max(1, keepdim=True)[1]
sem_seg = torch.zeros_like(sem_seg).scatter_(1, index, 1.0)
sem0 = torch.mean(sem_seg.float(), dim=(2, 3))
sem1 = data["sem_cond"]
if mode == "earrings":
if sem0[0, 15] != 0: # if already has earrings, sample new image
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
if sem0[0, 13] < 0.3: # hair is too short
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 15] = 0.03 # earrings
sem1[0, 13] -= 0.03 # hair
force_mean_idx = [15]
if mode == "skin":
if sem0[0, 13] > 0.08: # too much hair
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 1] += 0.06 # skin
sem1[0, 0] -= 0.06 # bg
if mode == "nose":
if sem0[0, 13] > 0.08: # too much hair
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 1] -= 0.05 # skin
sem1[0, 2] += 0.05 # nose
if mode == "hat":
if sem0[0, 14] != 0: # if already has hat, sample new image
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
if sem0[0, 13] < 0.3: # hair is too short
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 14] = 0.1 # hat
sem1[0, 13] -= 0.07 # hair
sem1[0, 0] -= 0.03 # bg
force_mean_idx = [14]
if mode == "glasses":
if sem0[0, 3] != 0: # if already has glasses, sample new image
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 3] = 0.06 # glasses
sem1[0, 1] -= 0.06 # skin
force_mean_idx = [3]
if mode == "openeyes":
if sem0[0, 4] != 0 or sem0[0, 5] != 0: # if already has eyes open, sample new image
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
if sem0[0, 3] != 0: # if has glasses, sample new image
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 4] = 0.01 # l_eye
sem1[0, 5] = 0.01 # r_eye
sem1[0, 1] -= 0.02 # skin
force_mean_idx = [4, 5]
if mode == "unbald":
if sem0[0, 13] != 0: # if already has hair, sample new image
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
if sem0[0, 14] != 0: # if has hat, hat might cover hair, sample new image
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 13] = 0.2 # hair
sem1[0, 1] -= 0.15 # skin
sem1[0, 0] -= 0.05 # bg
force_mean_idx = [13]
if mode == "bald":
if sem0[0, 13] > 0.15: # too much hair
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
if sem0[0, 13] < 0.05: # not enough hair
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 13] = 0 # hair
sem1[0, 1] += 3 * sem0[0, 13] / 4 # skin
sem1[0, 0] += sem0[0, 13] / 4 # bg
if mode == "teeth":
if sem0[0, 10] != 0: # if already has teeth, sample new image
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 10] = 0.02 # mouth
sem1[0, 1] -= 0.02 # skin
if self.addition_mode:
sem1[0, 11] += sem0[0, 11] # u_lip
sem1[0, 12] += sem0[0, 12] # l_lip
force_mean_idx = [10]
if mode == "eyebrows":
if sem0[0, 6] < 0.008: # not enough eyebrows
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 6] = 0.001 # l_brow
sem1[0, 7] = 0.001 # r_brow
sem1[0, 1] += sem0[0, 6] + sem0[0, 7] - 0.002 # skin
if mode == "morebrows":
if sem0[0, 6] > 0.001: # too much eyebrows
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 6] = 0.01 # l_brow
sem1[0, 7] = 0.01 # r_brow
sem1[0, 1] -= 0.02 - sem0[0, 6] - sem0[0, 7] # skin
if mode == "newbrows":
sem1 = sem0.clone()
sem1[0, 6] += sem0[0, 6] # l_brow
sem1[0, 7] += sem0[0, 7] # r_brow
sem1[0, 1] += sem0[0, 6] + sem0[0, 7] # skin
if mode == "hair":
if sem0[0, 13] > 0.2: # too much hair
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
if sem0[0, 13] < 0.15: # not enough hair
self.face_manipulation(savefile, bg_to_white=bg_to_white, mode=mode)
return
sem1 = sem0.clone()
sem1[0, 13] = 0.4 # hair
sem1[0, 1] -= 2/5 * (0.4 - sem0[0, 13]) # skin
sem1[0, 0] -= 3/5 * (0.4 - sem0[0, 13]) # bg
sem_cond = [sem1 * i / (self.vis_steps - 1) + sem0 * (1 - i / (self.vis_steps - 1)) for i in range(self.vis_steps)]
# print("same", sem_cond[0] == sem0)
sem_cond = torch.cat(sem_cond, dim=0)
# print("orig", sem_cond[:, :4] * 10000)
data["sem_cond"] = sem_cond
data["sem_seg"] = data["sem_seg"].repeat(self.vis_steps, 1, 1, 1)
data["z_seg"] = torch.randn(1, self.seg_opt.latent_dim).repeat(self.vis_steps, 1)
fake_data = []
batch_num = self.vis_steps // self.batch_size
style = self.get_style()
# save style
if self.load_style:
print("setting status to save_style")
self.seg_img_model.img_model.netG.set_status("save_style")
init_data = {k: v[[0]] if v.size(0) > 0 else v for k, v in data.items()}
init_data["obj_dic"] = deepcopy(style)
self.seg_img_model(data=init_data, mode='inference', log=True, as_list=False)
obj_dic = init_data["obj_dic"]
for i in force_mean_idx:
obj_dic[str(i)]["ACE"] = style[str(i)]["ACE"]
else:
obj_dic = style
# np.save("datasets/white.npy", obj_dic["0"]["ACE"].cpu().numpy())
# obj_dic["0"]["ACE"] = torch.tensor(np.load("datasets/white.npy")).cuda() # set bg to white
#print(ok)
# use style
print("setting status to use_style")
self.seg_img_model.img_model.netG.set_status("use_style")
for i in range(batch_num):
start = i * self.batch_size
end = min(start + self.batch_size, self.vis_steps)
batch_data = {k: v[start:end] if v.size(0) > 0 else v for k, v in data.items()}
batch_data["obj_dic"] = obj_dic
fake_data.append(self.seg_img_model(data=batch_data, mode='inference', log=True, as_list=False))
fake_data = {k: torch.cat([fake[k] for fake in fake_data], dim=0) for k in fake_data[0].keys()}
self.plot_face_interpolation(data, fake_data, savefile)
def process_raw_sem(self, data):
raw_sem = data["raw_sem_seg"].detach()
raw = torch.argmax(raw_sem, dim=1, keepdim=True).cpu()
raw[raw == self.opt.num_semantics] = -1
return raw
def plot_face_interpolation(self, data, fake_data, savefile):
sem_seg = fake_data["sem_seg"]
raw = self.process_raw_sem(fake_data)
pred = torch.argmax(sem_seg, dim=1, keepdim=True).cpu()
target = torch.argmax(data["sem_seg"], dim=1, keepdim=True).cpu()
sem_img = (color_transfer(pred, self.colormap) + 1) / 2
raw_img = (color_transfer(raw, self.colormap) + 1) / 2
true_sem_img = (color_transfer(target, self.colormap) + 1) / 2
sem_cond = data["sem_cond"].cpu()
if self.addition_mode:
sem_cond -= sem_cond[0].clone()
sem_cond[sem_cond < 0] = 0
fake_sem_cond = torch.mean(fake_data["raw_sem_seg"][:, :-1], dim=(2, 3)).cpu()
else:
fake_sem_cond = torch.mean(sem_seg, dim=(2, 3)).cpu()
fake_img = (fake_data["img"].cpu() + 1) / 2
img = (data["img"][0].cpu() + 1) / 2
if self.opt.save_full_res:
self.save_full_res(savefile, sem_cond, fake_sem_cond, [[true_sem_img[0]],
[img],
sem_img,
fake_img,
raw_img])
def plot(i, get_fig=False):
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2, figsize=(10, 15))
ax1.imshow(true_sem_img[0].numpy().transpose(1, 2, 0).squeeze())
ax1.set_axis_off()
ax2.imshow(img.numpy().transpose(1, 2, 0).squeeze())
ax2.set_axis_off()
ax3.imshow(sem_img[i].numpy().transpose(1, 2, 0))
ax3.set_axis_off()
ax4.imshow(fake_img[i].numpy().transpose(1, 2, 0).squeeze())
ax4.set_axis_off()
if not self.seg_opt.fill_crop_only and not self.seg_opt.merged_activation:
ax5.imshow(raw_img[i].numpy().transpose(1, 2, 0).squeeze())
ax5.set_axis_off()
x = np.array(range(self.opt.num_semantics))
width = 0.5
ax6.bar(x - width / 2, sem_cond[i], width, label='desired')
ax6.bar(x + width / 2, fake_sem_cond[i], width, label='generated')
ax6.xaxis.set_ticks(x)
ax6.set_xticklabels(self.opt.semantic_labels, rotation='vertical')
minor_ticks = np.array(range(-1, self.opt.num_semantics)) + 0.5
ax6.set_xticks(minor_ticks, minor=True)
ax6.xaxis.grid(which='major', alpha=1, c="white", lw=1)
ax6.xaxis.grid(which='minor', alpha=1, c="grey", lw=1)
ax6.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax6.set_ylim(0, 0.1 + max(torch.max(sem_cond), torch.max(fake_sem_cond)))
asp = np.diff(ax6.get_xlim())[0] / np.diff(ax6.get_ylim())[0]
asp /= np.abs(np.diff(ax1.get_xlim())[0] / np.diff(ax1.get_ylim())[0])
ax6.set_aspect(asp)
fig.tight_layout()
if get_fig:
return fig
def get_image(i):
fig = plot(i, get_fig=True)
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
return image
imageio.mimsave(f'{self.opt.log_path}/{savefile}', [get_image(i) for i in tqdm(range(self.vis_steps), desc=savefile)], fps=8)
def plot_ins_interpolation(self, fake_data, ins_cond, sem_cond, savefile):
sem_seg = fake_data["sem_seg"]
pred = torch.argmax(sem_seg, dim=1, keepdim=True).cpu()
sem_img = (color_transfer(pred, self.colormap) + 1) / 2
ins_cond = ins_cond.cpu()
sem_cond = sem_cond.cpu()
fake_sem_cond = torch.mean(sem_seg, dim=(2, 3)).cpu()
fake_ins_cond = torch.sum(fake_data["ins_density"], dim=(2, 3)).cpu()
img = (fake_data["img"].cpu() + 1) / 2
if fake_data["ins_edge"].size(0) > 0:
ins_img = fake_data["ins_edge"].cpu()
elif fake_data["ins_center"].size(0) > 0:
ins_offset = fake_data["ins_offset"].cpu()
index = sem_seg.max(dim=1, keepdim=True)[1]
seg_mc = torch.zeros_like(sem_seg).scatter_(1, index, 1.0)
bg = (seg_mc[:, self.opt.things_idx].sum(dim=1) == 0)
angle = (1 + torch.atan2(ins_offset[:, 1], ins_offset[:, 0]) / np.pi) / 2
sat_norm = torch.min(10 * (torch.sqrt(ins_offset[:, 0] ** 2 + ins_offset[:, 1] ** 2)), torch.tensor([1.]))
cmp = cm.get_cmap('hsv', 128)
offset_rgba = cmp(angle.numpy())
offset_rgb = torch.tensor(offset_rgba[:, :, :, :3]).float()
offset_rgb = sat_norm.unsqueeze(-1) * offset_rgb + (1 - sat_norm).unsqueeze(-1) * torch.ones_like(offset_rgb)
offset_rgb[bg] = torch.tensor([0., 0., 0.])
offset_rgb = offset_rgb.permute(0, 3, 1, 2)
center_mask = self.ins_refiner.get_peak_mask(fake_data["ins_center"].cpu()).float()
ins_img = offset_rgb - 2 * center_mask
ins_img[ins_img < 0] = 0.5
if self.opt.save_full_res:
self.save_full_res(savefile, sem_cond, fake_sem_cond, [img, sem_img, ins_img],
fake_ins=fake_ins_cond, real_ins=ins_cond)
def plot(i, get_fig=False):
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2, figsize=(15, 10))
ax1.imshow(sem_img[i].numpy().transpose(1, 2, 0))
ax1.set_axis_off()
x = np.array(range(self.opt.num_semantics))
width = 0.5
ax2.bar(x - width / 2, sem_cond[i], width, label='desired')
ax2.bar(x + width / 2, fake_sem_cond[i], width, label='generated')
ax2.xaxis.set_ticks(x)
ax2.set_xticklabels(self.opt.semantic_labels, rotation='vertical')
minor_ticks = np.array(range(-1, self.opt.num_semantics)) + 0.5
ax2.set_xticks(minor_ticks, minor=True)
ax2.xaxis.grid(which='major', alpha=1, c="white", lw=1)
ax2.xaxis.grid(which='minor', alpha=1, c="grey", lw=1)
ax2.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax2.set_ylim(0, 0.1 + max(torch.max(sem_cond), torch.max(fake_sem_cond)))
asp = np.diff(ax2.get_xlim())[0] / np.diff(ax2.get_ylim())[0]
asp /= np.abs(np.diff(ax1.get_xlim())[0] / np.diff(ax1.get_ylim())[0])
ax2.set_aspect(asp)
ax3.imshow(ins_img[i].numpy().transpose(1, 2, 0).squeeze())
ax3.set_axis_off()
x = np.array(range(self.opt.num_things))
width = 0.5
ax4.bar(x - width / 2, ins_cond[i], width, label='desired')
ax4.bar(x + width / 2, fake_ins_cond[i], width, label='predicted')
ax4.xaxis.set_ticks(x)
ax4.set_xticklabels([self.opt.semantic_labels[k] for k in self.opt.things_idx], rotation='vertical')
minor_ticks = np.array(range(-1, self.opt.num_semantics)) + 0.5
ax4.set_xticks(minor_ticks, minor=True)
ax4.xaxis.grid(which='major', alpha=1, c="white", lw=1)
ax4.xaxis.grid(which='minor', alpha=1, c="grey", lw=1)
ax4.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax4.set_ylim(0, 0.1 + max(torch.max(ins_cond), torch.max(fake_ins_cond)))
asp = np.diff(ax4.get_xlim())[0] / np.diff(ax4.get_ylim())[0]
asp /= np.abs(np.diff(ax1.get_xlim())[0] / np.diff(ax1.get_ylim())[0])
ax4.set_aspect(asp)
ax5.imshow(img[i].numpy().transpose(1, 2, 0).squeeze())
ax5.set_axis_off()
ax6.set_axis_off()
fig.tight_layout()
if get_fig:
return fig
def get_image(i):
fig = plot(i, get_fig=True)
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
return image
imageio.mimsave(f'{self.opt.log_path}/{savefile}', [get_image(i) for i in tqdm(range(self.batch_size), desc=savefile)], fps=8)
def plot_sem_interpolation(self, fake_data, sem_cond, savefile):
sem_seg = fake_data["sem_seg"]
pred = torch.argmax(sem_seg, dim=1, keepdim=True).cpu()
sem_img = (color_transfer(pred, self.colormap) + 1) / 2
sem_cond = sem_cond.cpu()
fake_sem_cond = torch.mean(sem_seg, dim=(2, 3)).cpu()
img = (fake_data["img"].cpu() + 1) / 2
sem_mask = fake_data["sem_mask"].cpu()
spread = torch.sum(sem_mask, dim=1)
spread_img = color_spread(spread, max_spread=5)
raw_sem_seg = (sem_mask + 0.000001) / torch.sum(sem_mask + 0.000001, dim=1, keepdim=True)
logprob = torch.log(raw_sem_seg + 0.00001)
entropy_img = -torch.sum(torch.mul(raw_sem_seg, logprob), dim=1, keepdim=True)
if self.opt.save_full_res:
self.save_full_res(savefile, sem_cond, fake_sem_cond, [img,
sem_img,
spread_img,
entropy_img])
def plot(i, get_fig=False):
fig = plt.figure(figsize=(15, 10))
gs = fig.add_gridspec(3, 2)
ax1 = fig.add_subplot(gs[0, :])
ax3 = fig.add_subplot(gs[1, 0])
ax4 = fig.add_subplot(gs[1, 1])
ax5 = fig.add_subplot(gs[2, 0])
ax6 = fig.add_subplot(gs[2, 1])
x = np.array(range(self.opt.num_semantics))
width = 0.5
ax1.bar(x - width / 2, sem_cond[i], width, label='desired')
ax1.bar(x + width / 2, fake_sem_cond[i], width, label='generated')
ax1.xaxis.set_ticks(x)
ax1.set_xticklabels(self.opt.semantic_labels, rotation='vertical')
minor_ticks = np.array(range(-1, self.opt.num_semantics)) + 0.5
ax1.set_xticks(minor_ticks, minor=True)
ax1.xaxis.grid(which='major', alpha=1, c="white", lw=1)
ax1.xaxis.grid(which='minor', alpha=1, c="grey", lw=1)
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax1.set_ylim(0, 0.1 + max(torch.max(sem_cond), torch.max(fake_sem_cond)))
# asp = np.diff(ax2.get_xlim())[0] / np.diff(ax2.get_ylim())[0]
# asp /= np.abs(np.diff(ax1.get_xlim())[0] / np.diff(ax1.get_ylim())[0])
# ax2.set_aspect(asp)
ax3.imshow(sem_img[i].numpy().transpose(1, 2, 0))
ax3.set_axis_off()
ax4.imshow(img[i].numpy().transpose(1, 2, 0).squeeze())
ax4.set_axis_off()
ax5.imshow(spread_img[i].numpy().transpose(1, 2, 0).squeeze())
ax5.set_axis_off()
ax6.imshow(entropy_img[i].numpy().transpose(1, 2, 0).squeeze(), cmap='Greys_r')
ax6.set_axis_off()
fig.tight_layout()
if get_fig:
return fig
def get_image(i):
fig = plot(i, get_fig=True)
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
return image
imageio.mimsave(f'{self.opt.log_path}/{savefile}', [get_image(i) for i in tqdm(range(self.vis_steps), desc=savefile)], fps=8)
def next_batch(self):
try:
return next(self.loader_iter)
except StopIteration:
self.loader_iter = iter(self.dataloader)
return next(self.loader_iter)
def run(self):
with Engine(self.opt) as engine:
self.engine = engine
self.dataset = create_dataset(self.opt, load_seg=True, load_img=True)
self.dataloader, self.datasampler = engine.create_dataloader(self.dataset, self.opt.vis_dataloader_bs, self.opt.num_workers, True)
self.loader_iter = iter(self.dataloader)
is_main = self.opt.local_rank == 0
logger = Logger(self.opt) if is_main else None
self.seg_img_model_on_one_gpu = SegImgModel(self.seg_opt, self.img_opt, is_train=False, is_main=is_main, logger=logger)
self.seg_img_model = engine.data_parallel(self.seg_img_model_on_one_gpu)
self.seg_img_model.eval()
self.colormap = create_colormap(self.opt)
self.load_style = not self.opt.mean_style_only
self.addition_mode = self.opt.addition_mode
if self.opt.vis_random_style:
self.style_codes = []
for i in range(self.opt.num_semantics):
self.style_codes.append(glob(os.path.join(self.opt.extraction_path, 'style_codes/*', str(i), 'ACE.npy')))
for i in range(self.opt.niter):
if 'fixedsem' in self.opt.vis_method:
self.fixedsem_manipulation(f"fixedsem_manipulation_{i}.gif")
if 'scrop' in self.opt.vis_method:
self.scrop_manipulation(f"scrop_manipulation_{i}.gif")
if 'latent' in self.opt.vis_method:
self.latent_interpolation(f"latent_interpolation_{i}.gif")
if 'cond' in self.opt.vis_method:
self.cond_interpolation(f"cond_interpolation_{i}.gif")
if 'car' in self.opt.vis_method:
self.ins_manipulation(26, 0.25, 0.25, 0.5, 20, f"car_manipulation_{i}.gif")
if 'person' in self.opt.vis_method:
self.ins_manipulation(24, 0.25, 0.25, 1, 20, f"person_manipulation_{i}.gif")
if 'face' in self.opt.vis_method:
self.face_manipulation(f"face_manipulation_{i}.gif")
if 'earrings' in self.opt.vis_method:
self.face_manipulation(f"earrings_manipulation_{i}.gif", mode="earrings")
if 'hair' in self.opt.vis_method:
self.face_manipulation(f"hair_manipulation_{i}.gif", mode="hair")
if 'rebald' in self.opt.vis_method:
self.face_manipulation(f"bald_manipulation_{i}.gif", mode="bald")
if 'unbald' in self.opt.vis_method:
self.face_manipulation(f"unbald_manipulation_{i}.gif", mode="unbald")
if 'hat' in self.opt.vis_method:
self.face_manipulation(f"hat_manipulation_{i}.gif", mode="hat")
if 'eyebrows' in self.opt.vis_method:
self.face_manipulation(f"eyebrows_manipulation_{i}.gif", mode="eyebrows")
if 'teeth' in self.opt.vis_method:
self.face_manipulation(f"teeth_manipulation_{i}.gif", mode="teeth")
if 'glasses' in self.opt.vis_method:
self.face_manipulation(f"glasses_manipulation_{i}.gif", mode="glasses")
if 'openeyes' in self.opt.vis_method:
self.face_manipulation(f"eyes_manipulation_{i}.gif", mode="openeyes")
if 'newbrows' in self.opt.vis_method:
self.face_manipulation(f"newbrows_manipulation_{i}.gif", mode="newbrows")
if 'morebrows' in self.opt.vis_method:
self.face_manipulation(f"morebrows_manipulation_{i}.gif", mode="morebrows")
if 'nose' in self.opt.vis_method:
self.face_manipulation(f"nose_manipulation_{i}.gif", mode="nose")
if 'skin' in self.opt.vis_method:
self.face_manipulation(f"skin_manipulation_{i}.gif", mode="skin")
print('Visualization was successfully finished.')
if __name__ == "__main__":
opt = Options().parse(load_seg_generator=True, load_img_generator=True, save=True)
Visualizer(opt).run()
| [
"torch.cat",
"torch.stack",
"torch.sum",
"torch.mul",
"torch.sqrt",
"torch.tensor",
"torch.zeros_like",
"torch.max",
"torch.mean",
"torch.log",
"torch.argmax",
"torch.ones_like",
"torch.atan2",
"torch.randn"
] | 1.0.0 | valeoai/SemanticPalette | a1b02a384c09881d6f1ca1a0c0ebfd87278c3d7d |
1.10 | # ------------------------------------------------------------------------------
# Libraries
# ------------------------------------------------------------------------------
import datetime
import json
import logging
import math
import os
from time import time
import torch
from utils.visualization import WriterTensorboardX
# ------------------------------------------------------------------------------
# Class of BaseTrainer
# ------------------------------------------------------------------------------
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(
self, model, loss, metrics, optimizer, resume, config, train_logger=None
):
self.config = config
# Setup directory for checkpoint saving
start_time = datetime.datetime.now().strftime("%m%d_%H%M%S")
self.checkpoint_dir = os.path.join(
config["trainer"]["save_dir"], config["name"], start_time
)
os.makedirs(self.checkpoint_dir, exist_ok=True)
# Setup logger
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
handlers=[
logging.FileHandler(os.path.join(self.checkpoint_dir, "train.log")),
logging.StreamHandler(),
],
)
self.logger = logging.getLogger(self.__class__.__name__)
# Setup GPU device if available, move model into configured device
self.device, device_ids = self._prepare_device(config["n_gpu"])
self.model = model.to(self.device)
if len(device_ids) > 1:
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
self.loss = loss
self.metrics = metrics
self.optimizer = optimizer
self.epochs = config["trainer"]["epochs"]
self.save_freq = config["trainer"]["save_freq"]
self.verbosity = config["trainer"]["verbosity"]
self.train_logger = train_logger
# configuration to monitor model performance and save best
self.monitor = config["trainer"]["monitor"]
self.monitor_mode = config["trainer"]["monitor_mode"]
assert self.monitor_mode in ["min", "max", "off"]
self.monitor_best = math.inf if self.monitor_mode == "min" else -math.inf
self.start_epoch = 1
# setup visualization writer instance
writer_train_dir = os.path.join(
config["visualization"]["log_dir"], config["name"], start_time, "train"
)
writer_valid_dir = os.path.join(
config["visualization"]["log_dir"], config["name"], start_time, "valid"
)
self.writer_train = WriterTensorboardX(
writer_train_dir, self.logger, config["visualization"]["tensorboardX"]
)
self.writer_valid = WriterTensorboardX(
writer_valid_dir, self.logger, config["visualization"]["tensorboardX"]
)
# Save configuration file into checkpoint directory
config_save_path = os.path.join(self.checkpoint_dir, "config.json")
with open(config_save_path, "w") as handle:
json.dump(config, handle, indent=4, sort_keys=False)
# Resume
if resume:
self._resume_checkpoint(resume)
def _prepare_device(self, n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self.logger.warning(
"Warning: There's no GPU available on this machine, training will be performed on CPU."
)
n_gpu_use = 0
if n_gpu_use > n_gpu:
msg = "Warning: The number of GPU's configured to use is {}, but only {} are available on this machine.".format(
n_gpu_use, n_gpu
)
self.logger.warning(msg)
n_gpu_use = n_gpu
device = torch.device("cuda:0" if n_gpu_use > 0 else "cpu")
list_ids = list(range(n_gpu_use))
return device, list_ids
def train(self):
for epoch in range(self.start_epoch, self.epochs + 1):
self.logger.info(
"\n----------------------------------------------------------------"
)
self.logger.info("[EPOCH %d]" % (epoch))
start_time = time()
result = self._train_epoch(epoch)
finish_time = time()
self.logger.info(
"Finish at {}, Runtime: {:.3f} [s]".format(
datetime.datetime.now(), finish_time - start_time
)
)
# save logged informations into log dict
log = {}
for key, value in result.items():
if key == "train_metrics":
log.update(
{
"train_" + mtr.__name__: value[i]
for i, mtr in enumerate(self.metrics)
}
)
elif key == "valid_metrics":
log.update(
{
"valid_" + mtr.__name__: value[i]
for i, mtr in enumerate(self.metrics)
}
)
else:
log[key] = value
# print logged informations to the screen
if self.train_logger is not None:
self.train_logger.add_entry(log)
if self.verbosity >= 1:
for key, value in sorted(list(log.items())):
self.logger.info("{:25s}: {}".format(str(key), value))
# evaluate model performance according to configured metric, save best checkpoint as model_best
best = False
if self.monitor_mode != "off":
try:
if (
self.monitor_mode == "min"
and log[self.monitor] < self.monitor_best
) or (
self.monitor_mode == "max"
and log[self.monitor] > self.monitor_best
):
self.logger.info(
"Monitor improved from %f to %f"
% (self.monitor_best, log[self.monitor])
)
self.monitor_best = log[self.monitor]
best = True
except KeyError:
if epoch == 1:
msg = (
"Warning: Can't recognize metric named '{}' ".format(
self.monitor
)
+ "for performance monitoring. model_best checkpoint won't be updated."
)
self.logger.warning(msg)
# Save checkpoint
self._save_checkpoint(epoch, save_best=best)
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param log: logging information of the epoch
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
"""
# Construct savedict
arch = type(self.model).__name__
state = {
"arch": arch,
"epoch": epoch,
"logger": self.train_logger,
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"monitor_best": self.monitor_best,
"config": self.config,
}
# Save checkpoint for each epoch
if (
self.save_freq is not None
): # Use None mode to avoid over disk space with large models
if epoch % self.save_freq == 0:
filename = os.path.join(
self.checkpoint_dir, "epoch{}.pth".format(epoch)
)
torch.save(state, filename)
self.logger.info("Saving checkpoint at {}".format(filename))
# Save the best checkpoint
if save_best:
best_path = os.path.join(self.checkpoint_dir, "model_best.pth")
torch.save(state, best_path)
self.logger.info("Saving current best at {}".format(best_path))
else:
self.logger.info("Monitor is not improved from %f" % (self.monitor_best))
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
self.logger.info("Loading checkpoint: {}".format(resume_path))
checkpoint = torch.load(resume_path)
self.start_epoch = checkpoint["epoch"] + 1
self.monitor_best = checkpoint["monitor_best"]
# load architecture params from checkpoint.
if checkpoint["config"]["arch"] != self.config["arch"]:
self.logger.warning(
"Warning: Architecture configuration given in config file is different from that of checkpoint. "
+ "This may yield an exception while state_dict is being loaded."
)
self.model.load_state_dict(checkpoint["state_dict"], strict=True)
# # load optimizer state from checkpoint only when optimizer type is not changed.
# if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
# self.logger.warning('Warning: Optimizer type given in config file is different from that of checkpoint. ' + \
# 'Optimizer parameters not being resumed.')
# else:
# self.optimizer.load_state_dict(checkpoint['optimizer'])
self.train_logger = checkpoint["logger"]
self.logger.info(
"Checkpoint '{}' (epoch {}) loaded".format(
resume_path, self.start_epoch - 1
)
)
| [
"torch.device",
"torch.save",
"torch.cuda.device_count",
"torch.load",
"torch.nn.DataParallel"
] | 1.10.1 | crutcher/stylelens | 8df3704f56fe6a30395eadcb1aee2e11563dfabb |
1.4 | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import torch
from torch.utils.data import DataLoader
import evaluate
from accelerate import Accelerator, DistributedType
from datasets import load_dataset
from transformers import (
AdamW,
AutoModelForSequenceClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
set_seed,
)
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
get_dataloaders = mocked_dataloaders # noqa: F811
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
correct_bias = config["correct_bias"]
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather((predictions, batch["labels"]))
# New Code #
# First we check if it's a distributed system
if accelerator.num_processes > 1:
# Then see if we're on the last batch of our eval dataloader
if step == len(eval_dataloader) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "correct_bias": True, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| [
"torch.no_grad",
"torch.utils.data.DataLoader"
] | 1.4.0 | techthiyanes/accelerate | 3d92caa24169b9606ba2a7dbf7b86dc0e009fba0 |
1.4 | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import json
import os
import re
import shutil
import tempfile
from collections import defaultdict
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from .offload import offload_weight, save_offload_index
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
def convert_file_size_to_int(size: Union[int, str]):
"""
Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
Args:
size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
Example:
```py
>>> convert_file_size_to_int("1MiB")
1048576
```
"""
if isinstance(size, int):
return size
if size.upper().endswith("GIB"):
return int(size[:-3]) * (2**30)
if size.upper().endswith("MIB"):
return int(size[:-3]) * (2**20)
if size.upper().endswith("KIB"):
return int(size[:-3]) * (2**10)
if size.upper().endswith("GB"):
int_size = int(size[:-2]) * (10**9)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("MB"):
int_size = int(size[:-2]) * (10**6)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("KB"):
int_size = int(size[:-2]) * (10**3)
return int_size // 8 if size.endswith("b") else int_size
raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")
def dtype_byte_size(dtype: torch.dtype):
"""
Returns the size (in bytes) occupied by one parameter of type `dtype`.
Example:
```py
>>> dtype_byte_size(torch.float32)
4
```
"""
if dtype == torch.bool:
return 1 / 8
bit_search = re.search("[^\d](\d+)$", str(dtype))
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
bit_size = int(bit_search.groups()[0])
return bit_size // 8
def set_module_tensor_to_device(
module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None
):
"""
A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing
`param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).
Args:
module (`torch.nn.Module`): The module in which the tensor we want to move lives.
param_name (`str`): The full name of the parameter/buffer.
device (`int`, `str` or `torch.device`): The device on which to set the tensor.
value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any
other device).
"""
# Recurse if needed
if "." in tensor_name:
splits = tensor_name.split(".")
for split in splits[:-1]:
new_module = getattr(module, split)
if new_module is None:
raise ValueError(f"{module} has no attribute {split}.")
module = new_module
tensor_name = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
is_buffer = tensor_name in module._buffers
old_value = getattr(module, tensor_name)
if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.")
with torch.no_grad():
if value is None:
new_value = old_value.to(device)
elif isinstance(value, torch.Tensor):
new_value = value.to(device)
else:
new_value = torch.tensor(value, device=device)
if is_buffer:
module._buffers[tensor_name] = new_value
else:
new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)
module._parameters[tensor_name] = new_value
def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):
"""
A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`
it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.
Args:
module (`torch.nn.Module`): The module we want the tensors or.
include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.
recurse (`bool`, *optional`, defaults to `False`):
Whether or not to go look in every submodule or just return the direct parameters and buffers.
"""
for named_parameter in module.named_parameters(recurse=recurse):
yield named_parameter
if include_buffers:
for named_buffer in module.named_buffers(recurse=recurse):
yield named_buffer
def find_tied_parameters(model: nn.Module, **kwargs):
"""
Find the tied parameters in a given model.
Args:
model (`torch.nn.Module`): The model to inspect.
<Tip warning={true}>
The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore
them.
</Tip>
Example:
```py
>>> from collections import OrderedDict
>>> import torch.nn as nn
>>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))]))
>>> model.linear2.weight = test_model.linear1.weight
>>> find_tied_parameters(test_model)
{'linear1.weight': 'linear2.weight'}
```
Returns:
Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.
"""
# Initialize result and named_parameters before recursing.
named_parameters = kwargs.get("named_parameters", None)
prefix = kwargs.get("prefix", "")
result = kwargs.get("result", {})
if named_parameters is None:
named_parameters = {n: p for n, p in model.named_parameters()}
else:
# A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`
# of the submodule it belongs to. So while recursing we track the names that are not in the initial
# `named_parameters`.
for name, parameter in model.named_parameters():
full_name = name if prefix == "" else f"{prefix}.{name}"
if full_name not in named_parameters:
# When we find one, it has to be one of the existing parameters.
for new_name, new_param in named_parameters.items():
if new_param is parameter:
result[new_name] = full_name
# Once we have treated direct parameters, we move to the child modules.
for name, child in model.named_children():
child_name = name if prefix == "" else f"{prefix}.{name}"
find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)
return result
def compute_module_sizes(model: nn.Module, dtype: Optional[Union[str, torch.device]] = None):
"""
Compute the size of each submodule of a given model.
"""
if isinstance(dtype, str):
# We accept "torch.float16" or just "float16"
dtype = dtype.replace("torch.", "")
dtype = getattr(torch, dtype)
if dtype is not None:
dtype_size = dtype_byte_size(dtype)
module_sizes = defaultdict(int)
for name, tensor in named_module_tensors(model, recurse=True):
if dtype is None:
size = tensor.numel() * dtype_byte_size(tensor.dtype)
else:
size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype))
name_parts = name.split(".")
for idx in range(len(name_parts) + 1):
module_sizes[".".join(name_parts[:idx])] += size
return module_sizes
def get_max_layer_size(
modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]
):
"""
Utility function that will scan a list of named modules and return the maximum size used by one full layer. The
definition of a layer being:
- a module with no direct children (just parameters and buffers)
- a module whose class name is in the list `no_split_module_classes`
Args:
modules (`List[Tuple[str, torch.nn.Module]]`):
The list of named modules where we want to determine the maximum layer size.
module_sizes (`Dict[str, int]`):
A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).
no_split_module_classes (`List[str]`):
A list of class names for layers we don't want to be split.
Returns:
`Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.
"""
max_size = 0
layer_names = []
modules_to_treat = modules.copy()
while len(modules_to_treat) > 0:
module_name, module = modules_to_treat.pop(0)
modules_children = list(module.named_children())
if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:
# No splitting this one so we compare to the max_size
size = module_sizes[module_name]
if size > max_size:
max_size = size
layer_names = [module_name]
elif size == max_size:
layer_names.append(module_name)
else:
modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat
return max_size, layer_names
def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):
"""
Get the maximum memory available if nothing is passed, converts string to int otherwise.
"""
import psutil
if max_memory is None:
if not torch.cuda.is_available():
max_memory = {}
else:
# Make sure CUDA is initialized on each GPU to have the right memory info.
for i in range(torch.cuda.device_count()):
_ = torch.tensor([0], device=i)
max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}
max_memory["cpu"] = psutil.virtual_memory().available
return max_memory
for key in max_memory:
if isinstance(max_memory[key], str):
max_memory[key] = convert_file_size_to_int(max_memory[key])
return max_memory
def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""):
"""
Cleans a device_map by grouping all submodules that go on the same device together.
"""
# Get the value of the current module and if there is only one split across several keys, regroup it.
prefix = "" if module_name == "" else f"{module_name}."
values = [v for k, v in device_map.items() if k.startswith(prefix)]
if len(set(values)) == 1 and len(values) > 1:
for k in [k for k in device_map if k.startswith(prefix)]:
del device_map[k]
device_map[module_name] = values[0]
# Recurse over the children
children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]
idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1
children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules)
for child in children_modules:
clean_device_map(device_map, module_name=child)
return device_map
def load_offloaded_weights(model, index, offload_folder):
if index is None or len(index) == 0:
# Nothing to do
return
for param_name, metadata in index.items():
tensor_file = os.path.join(offload_folder, f"{param_name}.dat")
shape = tuple(metadata["shape"])
weight = np.memmap(tensor_file, dtype=metadata["dtype"], mode="r", shape=shape)
set_module_tensor_to_device(model, param_name, "cpu", value=torch.tensor(weight))
def infer_auto_device_map(
model: nn.Module,
max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
no_split_module_classes: Optional[List[str]] = None,
dtype: Optional[Union[str, torch.dtype]] = None,
):
"""
Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,
such that:
- we don't exceed the memory available of any of the GPU.
- if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that
has the largest size.
- if offload to the CPU is needed,we don't exceed the RAM available on the CPU.
- if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk
that has the largest size.
<Tip>
All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
meta device (as it would if initialized within the `init_empty_weights` context manager).
</Tip>
Args:
model (`torch.nn.Module`): The model to analyze.
max_memory (`Dict`, *optional*):
A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
no_split_module_classes (`List[str]`, *optional*):
A list of layer class names that should never be split across device (for instance any layer that has a
residual connection).
dtype (`str` or `torch.dtype`, *optional*):
If provided, the weights will be converted to that type when loaded.
"""
# Get default / clean up max_memory
max_memory = get_max_memory(max_memory)
if no_split_module_classes is None:
no_split_module_classes = []
elif not isinstance(no_split_module_classes, (list, tuple)):
no_split_module_classes = [no_split_module_classes]
devices = list(max_memory.keys())
gpus = [device for device in devices if device != "cpu"]
if "disk" not in devices:
devices.append("disk")
# Devices that need to keep space for a potential offloaded layer.
main_devices = [gpus[0], "cpu"] if len(gpus) > 0 else ["cpu"]
module_sizes = compute_module_sizes(model, dtype=dtype)
tied_parameters = find_tied_parameters(model)
device_map = {}
current_device = 0
current_memory_used = 0
# Direct submodules and parameters
modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())
# Initialize maximum largest layer, to know which space to keep in memory
max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)
# Ready ? This is going to be a bit messy.
while len(modules_to_treat) > 0:
name, module = modules_to_treat.pop(0)
# Max size in the remaining layers may have changed since we took one, so we maybe update it.
max_layer_names = [n for n in max_layer_names if not n.startswith(name)]
if len(max_layer_names) == 0:
max_layer_size, max_layer_names = get_max_layer_size(
[(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
module_sizes,
no_split_module_classes,
)
# Assess size needed
module_size = module_sizes[name]
tied_params = [v for k, v in tied_parameters.items() if name in k]
# We ignore parameters that are tied when they're tied to > 1 one
tied_param = tied_params[0] if len(tied_params) == 1 else None
device = devices[current_device]
current_max_size = max_memory[device] if device != "disk" else None
# Reduce max size available by the largest layer.
if devices[current_device] in main_devices:
current_max_size = current_max_size - max_layer_size
# Case 1 -> We're too big!
if current_max_size is not None and current_memory_used + module_size > current_max_size:
# Split or not split?
modules_children = list(module.named_children())
if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:
# -> no split, we go to the next device
current_device += 1
modules_to_treat = [(name, module)] + modules_to_treat
current_memory_used = 0
else:
# -> split, we replace the module studied by its children + parameters
modules_children = list(module.named_parameters(recurse=False)) + modules_children
modules_to_treat = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_treat
# Update the max layer size.
max_layer_size, max_layer_names = get_max_layer_size(
[(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
module_sizes,
no_split_module_classes,
)
# Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.
elif tied_param is not None:
# Determine the sized occupied by this module + the module containing the tied parameter
tied_module_size = module_size
tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0]
tied_module_name, tied_module = modules_to_treat[tied_module_index]
tied_module_size += module_sizes[tied_module_name] - module_sizes[tied_param]
if current_max_size is not None and current_memory_used + tied_module_size > current_max_size:
# Split or not split?
tied_module_children = list(tied_module.named_children())
if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:
# If the tied module is not split, we go to the next device
current_device += 1
modules_to_treat = [(name, module)] + modules_to_treat
current_memory_used = 0
else:
# Otherwise, we replace the tied module by its children.
tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children
tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children]
modules_to_treat = (
[(name, module)]
+ modules_to_treat[:tied_module_index]
+ tied_module_children
+ modules_to_treat[tied_module_index + 1 :]
)
# Update the max layer size.
max_layer_size, max_layer_names = get_max_layer_size(
[(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
module_sizes,
no_split_module_classes,
)
else:
# We really really fit!
current_memory_used += tied_module_size
device_map[name] = devices[current_device]
modules_to_treat.pop(tied_module_index)
device_map[tied_module_name] = devices[current_device]
else:
current_memory_used += module_size
device_map[name] = devices[current_device]
return clean_device_map(device_map)
def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):
"""
Checks a device map covers everything in a given model.
Args:
model (`torch.nn.Module`): The model to check the device map against.
device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.
"""
all_model_tensors = [name for name, _ in model.state_dict().items()]
for module_name in device_map.keys():
all_model_tensors = [name for name in all_model_tensors if not name.startswith(module_name)]
if len(all_model_tensors) > 0:
non_covered_params = ", ".join(all_model_tensors)
raise ValueError(
f"The device_map provided does not give any device for the following parameters: {non_covered_params}"
)
def load_checkpoint_in_model(
model: nn.Module,
checkpoint: Union[str, os.PathLike],
device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,
offload_folder: Optional[Union[str, os.PathLike]] = None,
dtype: Optional[Union[str, torch.dtype]] = None,
offload_state_dict: bool = False,
):
"""
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
loaded.
<Tip warning={true}>
Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To
group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].
</Tip>
Args:
model (`torch.nn.Module`): The model in which we want to load a checkpoint.
checkpoint (`str` or `os.PathLike`):
The folder checkpoint to load. It can be:
- a path to a file containing a whole model state dict
- a path to a `.json` file containing the index to a sharded checkpoint
- a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
name, once a given module name is inside, every submodule of it will be sent to the same device.
offload_folder (`str` or `os.PathLike`, *optional*):
If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
dtype (`str` or `torch.dtype`, *optional*):
If provided, the weights will be converted to that type when loaded.
offload_state_dict (`bool`, *optional*, defaults to `False`):
If `True`, will temporarily offload the CPU state dict on the hard drive to avoig getting out of CPU RAM if
the weight of the CPU state dict + the biggest shard does not fit.
"""
if offload_folder is None and device_map is not None and "disk" in device_map.values():
raise ValueError(
"At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`."
)
elif offload_folder is not None and device_map is not None and "disk" in device_map.values():
os.makedirs(offload_folder, exist_ok=True)
if isinstance(dtype, str):
# We accept "torch.float16" or just "float16"
dtype = dtype.replace("torch.", "")
dtype = getattr(torch, dtype)
checkpoint_files = None
index_filename = None
if os.path.isfile(checkpoint):
if str(checkpoint).endswith(".json"):
index_filename = checkpoint
else:
checkpoint_files = [checkpoint]
elif os.path.isdir(checkpoint):
potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")]
if len(potential_index) == 0:
raise ValueError(f"{checkpoint} is not a folder containing a `.index.json` file.")
elif len(potential_index) == 1:
index_filename = os.path.join(checkpoint, potential_index[0])
else:
raise ValueError(f"{checkpoint} containing mote than one `.index.json` file, delete the irrelevant ones.")
else:
raise ValueError(
"`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded "
f"checkpoint, or a folder containing a sharded checkpoint, but got {checkpoint}."
)
if index_filename is not None:
checkpoint_folder = os.path.split(index_filename)[0]
with open(index_filename, "r") as f:
index = json.loads(f.read())
if "weight_map" in index:
index = index["weight_map"]
checkpoint_files = sorted(list(set(index.values())))
checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files]
# Logic for missing/unexepected keys goes here.
offload_index = {}
if offload_state_dict:
state_dict_folder = tempfile.mkdtemp()
state_dict_index = {}
for checkpoint_file in checkpoint_files:
checkpoint = torch.load(checkpoint_file)
if device_map is None:
model.load_state_dict(checkpoint, strict=False)
else:
for param_name, param in checkpoint.items():
module_name = param_name
if dtype is not None:
param = param.to(dtype)
while len(module_name) > 0 and module_name not in device_map:
module_name = ".".join(module_name.split(".")[:-1])
if module_name == "" and "" not in device_map:
# TODO: group all errors and raise at the end.
raise ValueError(f"{param_name} doesn't have any device set.")
param_device = device_map[module_name]
if param_device == "disk":
set_module_tensor_to_device(model, param_name, "meta")
offload_weight(param, param_name, offload_folder, index=offload_index)
elif param_device == "cpu" and offload_state_dict:
set_module_tensor_to_device(model, param_name, "meta")
offload_weight(param, param_name, state_dict_folder, index=state_dict_index)
else:
set_module_tensor_to_device(model, param_name, param_device, value=param)
# Force Python to clean up.
del checkpoint
gc.collect()
save_offload_index(offload_index, offload_folder)
# Load back offloaded state dict on CPU
if offload_state_dict:
load_offloaded_weights(model, state_dict_index, state_dict_folder)
shutil.rmtree(state_dict_folder)
| [
"torch.cuda.mem_get_info",
"torch.device",
"torch.no_grad",
"torch.nn.Parameter",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.tensor",
"torch.load"
] | 1.4.0 | techthiyanes/accelerate | 3d92caa24169b9606ba2a7dbf7b86dc0e009fba0 |
1.0 | import argparse
import random
import re
import numpy as np
import pandas as pd
import torch
def add_common_arg(parser):
def torch_device(arg):
if re.match('^(cuda(:[0-9]+)?|cpu)$', arg) is None:
raise argparse.ArgumentTypeError(
'Wrong device format: {}'.format(arg)
)
if arg != 'cpu':
splited_device = arg.split(':')
if (not torch.cuda.is_available()) or \
(len(splited_device) > 1 and
int(splited_device[1]) > torch.cuda.device_count()):
raise argparse.ArgumentTypeError(
'Wrong device: {} is not available'.format(arg)
)
return arg
# Base
parser.add_argument('--device',
type=torch_device, default='cuda',
help='Device to run: "cpu" or "cuda:<device number>"')
parser.add_argument('--seed',
type=int, default=0,
help='Seed')
return parser
def add_train_args(parser):
# Common
common_arg = parser.add_argument_group('Common')
add_common_arg(common_arg)
common_arg.add_argument('--train_load',
type=str, required=True,
help='Input data in csv format to train')
common_arg.add_argument('--val_load', type=str,
help="Input data in csv format to validation")
common_arg.add_argument('--model_save',
type=str, required=True, default='model.pt',
help='Where to save the model')
common_arg.add_argument('--save_frequency',
type=int, default=20,
help='How often to save the model')
common_arg.add_argument('--log_file',
type=str, required=False,
help='Where to save the log')
common_arg.add_argument('--config_save',
type=str, required=True,
help='Where to save the config')
common_arg.add_argument('--vocab_save',
type=str,
help='Where to save the vocab')
common_arg.add_argument('--vocab_load',
type=str,
help='Where to load the vocab; '
'otherwise it will be evaluated')
return parser
def add_sample_args(parser):
# Common
common_arg = parser.add_argument_group('Common')
add_common_arg(common_arg)
common_arg.add_argument('--model_load',
type=str, required=True,
help='Where to load the model')
common_arg.add_argument('--config_load',
type=str, required=True,
help='Where to load the config')
common_arg.add_argument('--vocab_load',
type=str, required=True,
help='Where to load the vocab')
common_arg.add_argument('--n_samples',
type=int, required=True,
help='Number of samples to sample')
common_arg.add_argument('--gen_save',
type=str, required=True,
help='Where to save the gen molecules')
common_arg.add_argument("--n_batch",
type=int, default=32,
help="Size of batch")
common_arg.add_argument("--max_len",
type=int, default=100,
help="Max of length of SMILES")
return parser
def read_smiles_csv(path):
return pd.read_csv(path,usecols=['SMILES'],squeeze=True).astype(str).tolist()
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| [
"torch.cuda.manual_seed_all",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available"
] | 1.0.5 | ksh981214/moses | 8a932ae578adb27c7f73ceb6f0de20acbe8196ff |
1.9 | import torch
def load_checkpoint(checkpoint_path: str, device: str):
"""Load checkpoint from file.
Args:
checkpoint_path (str): Path to checkpoint file
device (str): Device to load checkpoint on
"""
if "cpu" in device:
checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu"))
else:
checkpoint = torch.load(checkpoint_path)
weights = checkpoint["model_state_dict"]
return weights
| [
"torch.device",
"torch.load"
] | 1.9.0 | mintusf/land_cover_tracking | e1c389729fdb628e4d34e0d427f43f6317eba4ee |
1.8 | import itertools
import json
from dataclasses import dataclass
import numpy as np
import pandas as pd
import seqeval.metrics as seqeval_metrics
import torch
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from typing import Dict
from typing import List
import jiant.shared.model_resolution as model_resolution
import jiant.tasks.lib.bucc2018 as bucc2018_lib
import jiant.tasks.lib.mlqa as mlqa_lib
import jiant.tasks.lib.tatoeba as tatoeba_lib
import jiant.tasks.lib.templates.squad_style.core as squad_style
import jiant.tasks.lib.templates.squad_style.utils as squad_style_utils
import jiant.tasks.retrieval as tasks_retrieval
from jiant.tasks.lib.templates import mlm as mlm_template
from jiant.utils.python.datastructures import ExtendedDataClassMixin
from jiant.utils.python.io import read_json
from jiant.utils.string_comparing import exact_match_score
from jiant.utils.string_comparing import string_f1_score
@dataclass
class Metrics(ExtendedDataClassMixin):
major: float
minor: Dict
class BaseEvaluation:
pass
class BaseAccumulator:
def update(self, batch_logits, batch_loss, batch, batch_metadata):
raise NotImplementedError()
def get_guids(self):
return None
def get_accumulated(self):
raise NotImplementedError()
class BaseEvaluationScheme:
def get_accumulator(self) -> BaseAccumulator:
raise NotImplementedError()
def get_labels_from_cache_and_examples(self, task, cache, examples):
# Depending on the task, labels may be more easily extracted from
# a cache or raw examples.
# Provide the EvaluationScheme with either, but delegate to another function
# using only one.
raise NotImplementedError()
def get_preds_from_accumulator(self, task, accumulator):
raise NotImplementedError()
def compute_metrics_from_accumulator(
self, task, accumulator: BaseAccumulator, tokenizer, labels
) -> Metrics:
raise NotImplementedError()
class ConcatenateLogitsAccumulator(BaseAccumulator):
def __init__(self):
self.logits_list = []
self.guid_list = []
def update(self, batch_logits, batch_loss, batch, batch_metadata):
self.logits_list.append(batch_logits)
batch_guid = batch_metadata.get("guid")
if batch_guid is not None:
self.guid_list.append(batch_guid)
def get_guids(self):
if self.guid_list:
return np.concatenate(self.guid_list)
else:
return None
def get_accumulated(self):
all_logits = np.concatenate(self.logits_list)
return all_logits
class ConcatenateLossAccumulator(BaseAccumulator):
def __init__(self):
self.loss_list = []
def update(self, batch_logits, batch_loss, batch, batch_metadata):
self.loss_list.append(batch_loss)
def get_accumulated(self):
all_loss = np.array(self.loss_list)
return all_loss
class ConcatenateStringListAccumulator(BaseAccumulator):
def __init__(self):
self.str_list = []
def update(self, batch_logits, batch_loss, batch, batch_metadata):
bs = len(batch_logits)
span_pred = batch_logits.argmax(axis=1)
pred_token_start, pred_token_end = span_pred[:, 0], span_pred[:, 1]
pred_char_start = batch.token_idx_to_char_idx_start.cpu().numpy()[
range(bs), pred_token_start
]
pred_char_end = batch.token_idx_to_char_idx_end.cpu().numpy()[range(bs), pred_token_end]
self.str_list.extend(
[
s[i1 : i2 + 1]
for i1, i2, s in zip(pred_char_start, pred_char_end, batch.selection_str)
]
)
def get_accumulated(self):
return self.str_list
class SpanPredictionF1andEMScheme(BaseEvaluationScheme):
def get_accumulator(self):
return ConcatenateStringListAccumulator()
def get_labels_from_cache_and_examples(self, task, cache, examples):
return [datum["data_row"].gt_span_str for datum in cache.iter_all()]
def get_preds_from_accumulator(self, task, accumulator):
return accumulator.get_accumulated()
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
em = sum([exact_match_score(s1, s2) for s1, s2 in zip(preds, labels)]) / len(labels)
f1 = sum([string_f1_score(s1, s2) for s1, s2 in zip(preds, labels)]) / len(labels)
scores = {"f1": f1, "em": em, "avg": (f1 + em) / 2}
return Metrics(major=scores["avg"], minor=scores)
def compute_metrics_from_accumulator(
self, task, accumulator: ConcatenateStringListAccumulator, tokenizer, labels: list
) -> Metrics:
preds = self.get_preds_from_accumulator(task=task, accumulator=accumulator)
return self.compute_metrics_from_preds_and_labels(preds=preds, labels=labels)
class RecordAccumulator(ConcatenateLogitsAccumulator):
def __init__(self):
super().__init__()
self.entity_strs = []
self.gold_label_list = []
def update(self, batch_logits, batch_loss, batch, batch_metadata):
super().update(batch_logits, batch_loss, batch, batch_metadata)
self.entity_strs.extend(batch.entity_str)
self.gold_label_list.extend(batch.label_set)
def get_accumulated(self):
return super().get_accumulated(), self.entity_strs
def get_gold_label_list(self):
return self.gold_label_list
class MLMPremaskedAccumulator(BaseAccumulator):
def __init__(self):
self.loss_list = []
self.logits_list = []
def update(self, batch_logits, batch_loss, batch, batch_metadata):
batch_size = len(batch)
# Select the tokens that we do MLM prediction on
masked_tokens_selector = (
batch.masked_lm_labels.cpu().numpy() != mlm_template.NON_MASKED_TOKEN_LABEL_ID
)
for i in range(batch_size):
# noinspection PyUnresolvedReferences
self.logits_list.append(batch_logits[i][masked_tokens_selector[i]])
self.loss_list.append(batch_loss)
def get_accumulated(self):
return self.loss_list, self.logits_list
class TatoebaAccumulator(BaseAccumulator):
def __init__(self):
self.embeddings_list = []
self.is_english_list = []
def update(self, batch_logits, batch_loss, batch, batch_metadata):
self.embeddings_list.append(batch_logits)
self.is_english_list.append(batch.is_english.cpu().numpy())
@classmethod
def get_guids(cls):
return None
def get_accumulated(self):
all_embeddings = np.concatenate(self.embeddings_list)
is_english_arr = np.concatenate(self.is_english_list).astype(bool)
return all_embeddings, is_english_arr
class Bucc2018Accumulator(BaseAccumulator):
def __init__(self):
self.embeddings_list = []
self.is_english_list = []
self.text_hash_list = []
self.guid_list = []
def update(self, batch_logits, batch_loss, batch, batch_metadata):
self.embeddings_list.append(batch_logits)
self.is_english_list.append(batch.is_english.cpu().numpy())
self.text_hash_list += batch.text_hash
self.guid_list += batch.guid
@classmethod
def get_guids(cls):
return None
def get_accumulated(self):
return {
"all_embeddings": np.concatenate(self.embeddings_list),
"is_english_arr": np.concatenate(self.is_english_list).astype(bool),
"text_hash_list": self.text_hash_list,
"guid_list": self.guid_list,
}
class BaseLogitsEvaluationScheme(BaseEvaluationScheme):
def get_accumulator(self):
return ConcatenateLogitsAccumulator()
def get_labels_from_cache_and_examples(self, task, cache, examples):
return get_label_ids_from_cache(cache=cache)
def get_preds_from_accumulator(self, task, accumulator):
raise NotImplementedError()
def compute_metrics_from_accumulator(
self, task, accumulator: ConcatenateLogitsAccumulator, tokenizer, labels: list
) -> Metrics:
preds = self.get_preds_from_accumulator(task=task, accumulator=accumulator)
return self.compute_metrics_from_preds_and_labels(preds=preds, labels=labels)
def compute_metrics_from_preds_and_labels(self, preds, labels):
raise NotImplementedError()
class SimpleAccuracyEvaluationScheme(BaseLogitsEvaluationScheme):
@classmethod
def get_preds_from_accumulator(cls, task, accumulator):
logits = accumulator.get_accumulated()
return np.argmax(logits, axis=1)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
# noinspection PyUnresolvedReferences
acc = float((preds == labels).mean())
return Metrics(major=acc, minor={"acc": acc})
class MCTACOEvaluationScheme(BaseLogitsEvaluationScheme):
@classmethod
def get_preds_from_accumulator(self, task, accumulator):
logits = accumulator.get_accumulated()
pred = np.argmax(logits, axis=1)
guid = accumulator.get_guids()
return guid, pred
@classmethod
def compute_metrics_from_accumulator(self, task, accumulator, tokenizer, labels) -> Metrics:
guid, pred = self.get_preds_from_accumulator(task=task, accumulator=accumulator)
em_ls = []
f1_ls = []
label_pred_by_question = {}
for one_guid, one_pred, one_label in zip(guid, pred, labels):
split, question_id, example_id = one_guid.split("-")
if question_id not in label_pred_by_question:
label_pred_by_question[question_id] = [], []
label_pred_by_question[question_id][0].append(one_label)
label_pred_by_question[question_id][1].append(one_pred)
em_ls = [
float(group_label == group_pred)
for group_label, group_pred in label_pred_by_question.values()
]
f1_ls = [
f1_score(y_true=group_label, y_pred=group_pred)
for group_label, group_pred in label_pred_by_question.values()
]
em = sum(em_ls) / len(em_ls)
f1 = sum(f1_ls) / len(f1_ls)
minor = {
"em": em,
"f1": f1,
"f1_em": (f1 + em) / 2,
}
metrics = Metrics(major=minor["f1_em"], minor=minor,)
return metrics
class MultiLabelAccAndF1EvaluationScheme(BaseLogitsEvaluationScheme):
def get_labels_from_cache_and_examples(self, task, cache, examples):
return get_multi_label_ids_from_cache(cache=cache)
def get_preds_from_accumulator(self, task, accumulator):
logits = accumulator.get_accumulated()
return (logits > 0.5).astype(int)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
# noinspection PyUnresolvedReferences
acc = float((preds == labels).mean())
labels = np.array(labels)
minor = {
"acc": acc,
"f1_micro": f1_score(y_true=labels, y_pred=preds, average="micro"),
"acc_and_f1_micro": (acc + f1_score(y_true=labels, y_pred=preds, average="micro")) / 2,
}
return Metrics(major=minor["acc_and_f1_micro"], minor=minor)
class AccAndF1EvaluationScheme(BaseLogitsEvaluationScheme):
def get_preds_from_accumulator(self, task, accumulator):
logits = accumulator.get_accumulated()
return np.argmax(logits, axis=1)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
# noinspection PyUnresolvedReferences
acc = float((preds == labels).mean())
labels = np.array(labels)
f1 = f1_score(y_true=labels, y_pred=preds)
minor = {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
return Metrics(major=minor["acc_and_f1"], minor=minor)
class MCCEvaluationScheme(BaseLogitsEvaluationScheme):
def get_preds_from_accumulator(self, task, accumulator):
logits = accumulator.get_accumulated()
return np.argmax(logits, axis=1)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
mcc = matthews_corrcoef(labels, preds)
return Metrics(major=mcc, minor={"mcc": mcc})
class PearsonAndSpearmanEvaluationScheme(BaseLogitsEvaluationScheme):
def get_labels_from_cache_and_examples(self, task, cache, examples):
return get_label_vals_from_cache(cache=cache)
def get_preds_from_accumulator(self, task, accumulator):
logits = accumulator.get_accumulated()
return np.squeeze(logits, axis=-1)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
pearson_corr = float(pearsonr(preds, labels)[0])
spearman_corr = float(spearmanr(preds, labels)[0])
minor = {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
return Metrics(major=minor["corr"], minor=minor)
class MultipleChoiceAccuracyEvaluationScheme(BaseLogitsEvaluationScheme):
def get_accumulator(self):
return ConcatenateLogitsAccumulator()
@classmethod
def get_labels_from_examples(cls, task, examples):
return get_multiple_choice_label_ids_from_examples(task=task, examples=examples)
def get_labels_from_cache_and_examples(self, task, cache, examples):
return get_multiple_choice_labels_from_cache(cache=cache)
def get_preds_from_accumulator(self, task, accumulator):
return SimpleAccuracyEvaluationScheme.get_preds_from_accumulator(
task=task, accumulator=accumulator,
)
def compute_metrics_from_preds_and_labels(self, preds, labels):
return SimpleAccuracyEvaluationScheme.compute_metrics_from_preds_and_labels(
preds=preds, labels=labels
)
class CommitmentBankEvaluationScheme(BaseLogitsEvaluationScheme):
def get_preds_from_accumulator(self, task, accumulator):
logits = accumulator.get_accumulated()
return np.argmax(logits, axis=1)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
# noinspection PyUnresolvedReferences
acc = float((preds == labels).mean())
labels = np.array(labels)
f11 = f1_score(y_true=labels == 0, y_pred=preds == 0)
f12 = f1_score(y_true=labels == 1, y_pred=preds == 1)
f13 = f1_score(y_true=labels == 2, y_pred=preds == 2)
avg_f1 = mean(f11, f12, f13)
return Metrics(
major=mean(acc, avg_f1),
minor={"acc": acc, "avg_f1": avg_f1, "f11": f11, "f12": f12, "f13": f13},
)
class MultiRCEvaluationScheme(BaseEvaluationScheme):
def get_accumulator(self):
return ConcatenateLogitsAccumulator()
@classmethod
def get_labels_from_examples(cls, task, examples):
label_values = get_label_ids(examples=examples, task=task)
question_ids = np.array([example.question_id for example in examples])
assert len(label_values) == len(question_ids)
return [
{"label_values": lab, "question_ids": qid}
for lab, qid in zip(label_values, question_ids)
]
@classmethod
def get_labels_from_cache(cls, cache):
label_values = []
question_ids = []
for datum in cache.iter_all():
label_values.append(datum["data_row"].label_id)
question_ids.append(datum["data_row"].question_id)
label_values = np.array(label_values)
question_ids = np.array(question_ids)
assert len(label_values) == len(question_ids)
return [
{"label_values": lab, "question_ids": qid}
for lab, qid in zip(label_values, question_ids)
]
def get_labels_from_cache_and_examples(self, task, cache, examples):
return self.get_labels_from_examples(task=task, examples=examples)
def get_preds_from_accumulator(self, task, accumulator):
logits = accumulator.get_accumulated()
return np.argmax(logits, axis=-1)
def compute_metrics_from_accumulator(
self, task, accumulator: ConcatenateLogitsAccumulator, tokenizer, labels: list
) -> Metrics:
preds = self.get_preds_from_accumulator(task=task, accumulator=accumulator)
return self.compute_metrics_from_preds_and_labels(preds=preds, labels=labels,)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
df = pd.DataFrame(labels)
assert "label_values" in df.columns
assert "question_ids" in df.columns
df["preds"] = preds
# noinspection PyUnresolvedReferences
exact_match = (
df.groupby("question_ids")
.apply(lambda _: (_["preds"] == _["label_values"]).all())
.mean()
)
exact_match = float(exact_match)
f1 = f1_score(y_true=df["label_values"], y_pred=df["preds"])
return Metrics(major=mean(exact_match, f1), minor={"em": exact_match, "f1": f1},)
@dataclass
class RecordLabelData:
passage_idx: int
question_idx: int
entity_str: str
answers_dict: Dict[str, str]
class ReCordEvaluationScheme(BaseEvaluationScheme):
def get_accumulator(self):
return RecordAccumulator()
@classmethod
def get_labels_from_examples(cls, examples):
return [
RecordLabelData(
passage_idx=example.passage_idx,
question_idx=example.question_idx,
entity_str=example.entity_str,
answers_dict=example.answers_dict,
)
for example in examples
]
@classmethod
def get_labels_from_cache_and_examples(cls, task, cache, examples):
return cls.get_labels_from_examples(examples=examples)
@classmethod
def get_preds_from_accumulator(cls, task, accumulator):
logits, entity_strs = accumulator.get_accumulated()
guid_list = accumulator.get_guids()
question_ids = []
for guid in guid_list:
question_ids.append(guid.split("-")[2])
# group logits by question id then reorder for submission
# need question id, logit, and entity_str
# for example, dict of question id to logit and entity_str
max_logits = {}
for logit, entity_str, question_id in zip(logits, entity_strs, question_ids):
if (question_id not in max_logits) or (max_logits[question_id]["logit"][1] < logit[1]):
max_logits[question_id] = {"logit": logit, "entity_str": entity_str}
# Convert labels of max_logits to prediction format
preds = []
for question_idx, logit_entity in max_logits.items():
preds.append({"idx": question_idx, "label": logit_entity["entity_str"]})
return preds
def compute_metrics_from_accumulator(
self, task, accumulator: RecordAccumulator, tokenizer, labels: List
) -> Metrics:
predictions_dict, metrics = self.compute_preds_and_metrics(task, accumulator)
return metrics
@classmethod
def compute_preds_and_metrics(cls, task, accumulator):
f1_ls = []
em_ls = []
predictions_dict = {}
preds = cls.get_preds_from_accumulator(task, accumulator)
guid_list = accumulator.get_guids()
gold_label_list_of_sets = accumulator.get_gold_label_list()
question_ids = []
for guid in guid_list:
question_ids.append(guid.split("-")[2])
# Reduce list of gold label sets to a gold label set per question_id
gold_labels = {}
for question_id, gold_label_set in zip(question_ids, gold_label_list_of_sets):
if question_id in gold_labels:
assert gold_label_set == gold_labels[question_id]
else:
gold_labels[question_id] = gold_label_set
for pred, gold_label_set in zip(preds, gold_labels.values()):
pred_ans = pred["label"]
# F1
f1 = cls.metric_max_over_ground_truths(string_f1_score, pred_ans, gold_label_set)
f1_ls.append(f1)
# EM
em = cls.metric_max_over_ground_truths(exact_match_score, pred_ans, gold_label_set)
em_ls.append(em)
em = sum(em_ls) / len(em_ls)
f1 = sum(f1_ls) / len(f1_ls)
minor = {
"em": em,
"f1": f1,
"f1_em": (f1 + em) / 2,
}
metrics = Metrics(major=minor["f1_em"], minor=minor,)
return predictions_dict, metrics
@classmethod
def metric_max_over_ground_truths(cls, metric_fn, prediction, ground_truths):
"""Compute max metric between prediction and each ground truth.
From official ReCoRD eval script
"""
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
class CCGEvaluationScheme(BaseEvaluationScheme):
def get_accumulator(self):
return ConcatenateLogitsAccumulator()
@classmethod
def get_label_ids_from_cache(cls, cache):
return [
{"label_ids": datum["data_row"].label_ids, "label_mask": datum["data_row"].label_mask}
for datum in cache.iter_all()
]
@classmethod
def get_labels_from_cache_and_examples(cls, task, cache, examples):
return cls.get_label_ids_from_cache(cache=cache)
def get_preds_from_accumulator(self, task, accumulator):
logits = accumulator.get_accumulated()
return np.argmax(logits, axis=-1)
def compute_metrics_from_accumulator(
self, task, accumulator: ConcatenateLogitsAccumulator, tokenizer, labels: list
) -> Metrics:
preds = self.get_preds_from_accumulator(task=task, accumulator=accumulator)
return self.compute_metrics_from_preds_and_labels(preds=preds, labels=labels,)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
label_ids = np.stack([row["label_ids"] for row in labels])
label_mask = np.stack([row["label_mask"] for row in labels])
# Account for smart-truncate
assert (label_mask[:, preds.shape[-1] :] == 0).all()
label_ids = label_ids[:, : preds.shape[-1]]
label_mask = label_mask[:, : preds.shape[-1]]
bool_mask = label_mask.reshape(-1).astype(bool)
flat_preds = preds.reshape(-1)[bool_mask]
flat_labels = label_ids.reshape(-1)[bool_mask]
return cls.compute_metrics_from_flat_preds_and_labels(
flat_preds=flat_preds, flat_labels=flat_labels,
)
@classmethod
def compute_metrics_from_flat_preds_and_labels(cls, flat_preds, flat_labels):
return SimpleAccuracyEvaluationScheme.compute_metrics_from_preds_and_labels(
preds=flat_preds, labels=flat_labels,
)
class F1TaggingEvaluationScheme(BaseEvaluationScheme):
def get_accumulator(self):
return ConcatenateLogitsAccumulator()
@classmethod
def get_labels_from_cache_and_examples(cls, task, cache, examples):
labels = []
for datum in cache.iter_all():
label_mask = datum["data_row"].label_mask.astype(bool)
pos_list = [
task.ID_TO_LABEL[pos_id] for pos_id in datum["data_row"].label_ids[label_mask]
]
label = {
"pos_list": pos_list,
"label_mask": label_mask,
}
labels.append(label)
assert len(pos_list) == label_mask.sum()
return labels
def get_preds_from_accumulator(self, task, accumulator):
logits = accumulator.get_accumulated()
return np.argmax(logits, axis=-1)
def compute_metrics_from_accumulator(
self, task, accumulator: ConcatenateLogitsAccumulator, tokenizer, labels: list
) -> Metrics:
preds = self.get_preds_from_accumulator(task=task, accumulator=accumulator)
return self.compute_metrics_from_preds_and_labels(task=task, preds=preds, labels=labels,)
@classmethod
def compute_metrics_from_preds_and_labels(cls, task, preds, labels):
label_mask = np.stack([row["label_mask"] for row in labels])
# Account for smart-truncate
assert (label_mask[:, preds.shape[-1] :] == 0).all()
label_mask = label_mask[:, : preds.shape[-1]]
labels_for_eval = [label["pos_list"] for label in labels]
preds_for_eval = []
assert len(labels) == preds.shape[0]
for i in range(len(labels)):
relevant_preds = preds[i][label_mask[i]]
relevant_preds_pos = [task.ID_TO_LABEL[pos_id] for pos_id in relevant_preds]
preds_for_eval.append(relevant_preds_pos)
minor = {
"precision": seqeval_metrics.precision_score(labels_for_eval, preds_for_eval),
"recall": seqeval_metrics.recall_score(labels_for_eval, preds_for_eval),
"f1": seqeval_metrics.f1_score(labels_for_eval, preds_for_eval),
}
return Metrics(major=minor["f1"], minor=minor,)
class SQuADEvaluationScheme(BaseEvaluationScheme):
@classmethod
def get_accumulator(cls) -> BaseAccumulator:
return ConcatenateLogitsAccumulator()
@classmethod
def get_labels_from_cache(cls, cache):
return [cls.get_label_from_data_row(datum["data_row"]) for datum in cache.iter_all()]
@classmethod
def get_labels_from_cache_and_examples(cls, task, cache, examples):
return cls.get_labels_from_cache(cache=cache)
def get_preds_from_accumulator(self, task, accumulator):
raise NotImplementedError("Currently can't be done without access to dataset")
def compute_metrics_from_accumulator(
self, task, accumulator: BaseAccumulator, tokenizer, labels
) -> Metrics:
logits = accumulator.get_accumulated()
results, predictions = squad_style.compute_predictions_logits_v3(
data_rows=labels,
logits=logits,
n_best_size=task.n_best_size,
max_answer_length=task.max_answer_length,
do_lower_case=model_resolution.resolve_is_lower_case(tokenizer),
version_2_with_negative=task.version_2_with_negative,
null_score_diff_threshold=task.null_score_diff_threshold,
tokenizer=tokenizer,
)
if task.version_2_with_negative:
# Return the score after the best thresholds for answering has been selected
return Metrics(major=(results["best_f1"] + results["best_exact"]) / 2, minor=results)
else:
return Metrics(major=(results["f1"] + results["exact"]) / 2, minor=results)
@classmethod
def get_label_from_data_row(cls, data_row):
return squad_style.PartialDataRow.from_data_row(data_row)
class XlingQAEvaluationScheme(BaseEvaluationScheme):
@classmethod
def get_accumulator(cls) -> BaseAccumulator:
return ConcatenateLogitsAccumulator()
@classmethod
def get_labels_from_cache(cls, cache):
return [cls.get_label_from_data_row(datum["data_row"]) for datum in cache.iter_all()]
@classmethod
def get_labels_from_cache_and_examples(cls, task, cache, examples):
return cls.get_labels_from_cache(cache=cache)
def get_preds_from_accumulator(self, task, accumulator):
raise NotImplementedError("Currently can't be done without access to dataset")
def compute_metrics_from_accumulator(
self, task, accumulator: BaseAccumulator, tokenizer, labels
) -> Metrics:
logits = accumulator.get_accumulated()
assert isinstance(task, (tasks_retrieval.TyDiQATask, tasks_retrieval.XquadTask))
lang = task.language
results, predictions = squad_style.compute_predictions_logits_v3(
data_rows=labels,
logits=logits,
n_best_size=task.n_best_size,
max_answer_length=task.max_answer_length,
do_lower_case=model_resolution.resolve_is_lower_case(tokenizer),
version_2_with_negative=task.version_2_with_negative,
null_score_diff_threshold=task.null_score_diff_threshold,
skip_get_final_text=(lang == "zh"),
tokenizer=tokenizer,
)
return Metrics(major=(results["f1"] + results["exact"]) / 2, minor=results,)
@classmethod
def get_label_from_data_row(cls, data_row):
return squad_style.PartialDataRow.from_data_row(data_row)
class MLQAEvaluationScheme(SQuADEvaluationScheme):
def get_preds_from_accumulator(self, task, accumulator):
raise NotImplementedError("Too hard for now, too much handled in one giant lib")
def compute_metrics_from_accumulator(
self, task, accumulator: BaseAccumulator, tokenizer, labels
) -> Metrics:
# Todo: Fix val labels cache
# This is a quick hack
logits = accumulator.get_accumulated()
partial_examples = squad_style.data_rows_to_partial_examples(data_rows=labels)
all_pred_results = squad_style.logits_to_pred_results_list(logits)
assert task.context_language == task.question_language
lang = task.context_language
predictions = squad_style_utils.compute_predictions_logits_v2(
partial_examples=partial_examples,
all_results=all_pred_results,
n_best_size=task.n_best_size,
max_answer_length=task.max_answer_length,
do_lower_case=model_resolution.resolve_is_lower_case(tokenizer),
version_2_with_negative=task.version_2_with_negative,
null_score_diff_threshold=task.null_score_diff_threshold,
tokenizer=tokenizer,
skip_get_final_text=(lang == "zh"),
verbose=True,
)
dataset = read_json(task.val_path)["data"]
results = mlqa_lib.evaluate(dataset=dataset, predictions=predictions, lang=lang,)
return Metrics(major=(results["f1"] + results["exact_match"]) / 2, minor=results,)
class MLMEvaluationScheme(BaseEvaluationScheme):
@classmethod
def get_accumulator(cls) -> BaseAccumulator:
return ConcatenateLossAccumulator()
def get_labels_from_cache_and_examples(self, task, cache, examples):
# This is a dummy function. There are no external labels.
return [None]
def get_preds_from_accumulator(self, task, accumulator):
raise NotImplementedError("Not possible")
def compute_metrics_from_accumulator(
self, task, accumulator: BaseAccumulator, tokenizer, labels
) -> Metrics:
loss_list = accumulator.get_accumulated()
average_loss = mean(loss_list)
perplexity = np.exp(average_loss)
return Metrics(
# Major = negative perplexity
major=-perplexity,
minor={"perplexity": perplexity},
)
class MLMPremaskedEvaluationScheme(MLMEvaluationScheme):
@classmethod
def get_accumulator(cls) -> BaseAccumulator:
return MLMPremaskedAccumulator()
@classmethod
def get_labels_from_cache_and_examples(cls, task, cache, examples):
labels = []
for datum in cache.iter_all():
masked_lm_labels = datum["data_row"].masked_lm_labels
labels.append(
masked_lm_labels[masked_lm_labels != mlm_template.NON_MASKED_TOKEN_LABEL_ID]
)
return labels
def get_preds_from_accumulator(self, task, accumulator):
_, preds = accumulator.get_accumulated()
return preds
def compute_metrics_from_accumulator(
self, task, accumulator: BaseAccumulator, tokenizer, labels
) -> Metrics:
loss_list, _ = accumulator.get_accumulated()
average_loss = mean(loss_list)
perplexity = np.exp(average_loss)
return Metrics(
# Major = negative perplexity
major=-perplexity,
minor={"perplexity": perplexity},
)
class TatoebaEvaluationScheme(BaseEvaluationScheme):
def get_accumulator(self):
return TatoebaAccumulator()
def get_labels_from_cache_and_examples(self, task, cache, examples):
return task.get_val_labels()
def get_preds_from_accumulator(self, task, accumulator):
all_embeddings, is_english_arr = accumulator.get_accumulated()
other_lang_embeddings = all_embeddings[~is_english_arr]
eng_embeddings = all_embeddings[is_english_arr]
predictions = tatoeba_lib.similarity_search(
x=other_lang_embeddings,
y=eng_embeddings,
dim=other_lang_embeddings.shape[-1],
normalize=True,
).flatten()
return predictions
def compute_metrics_from_accumulator(
self, task, accumulator: ConcatenateLogitsAccumulator, tokenizer, labels: list
) -> Metrics:
preds = self.get_preds_from_accumulator(task=task, accumulator=accumulator)
return self.compute_metrics_from_preds_and_labels(preds=preds, labels=labels,)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
# noinspection PyUnresolvedReferences
acc = (preds == labels).mean()
return Metrics(major=acc, minor={"acc": acc})
class Bucc2018EvaluationScheme(BaseEvaluationScheme):
def get_accumulator(self):
return Bucc2018Accumulator()
def get_labels_from_cache_and_examples(self, task, cache, examples):
return task.get_val_labels()
def get_preds_from_accumulator(self, task, accumulator, threshold=0):
accumulated = accumulator.get_accumulated()
is_english_arr = accumulated["is_english_arr"]
all_embeddings = accumulated["all_embeddings"]
guids = accumulated["guid_list"]
text_hash_list = accumulated["text_hash_list"]
other_lang_embeddings = all_embeddings[~is_english_arr]
eng_embeddings = all_embeddings[is_english_arr]
english_guids = [x.split("-", 1)[1] for x in np.array(guids)[is_english_arr]]
other_guids = [x.split("-", 1)[1] for x in np.array(guids)[~is_english_arr]]
n = len(is_english_arr)
src_inds, _ = bucc2018_lib.get_unique_lines(
[text_hash_list[i] for i in np.arange(n) if not is_english_arr[i]]
)
trg_inds, _ = bucc2018_lib.get_unique_lines(
[text_hash_list[i] for i in np.arange(n) if is_english_arr[i]]
)
src_ids_map = bucc2018_lib.create_ids_map(src_inds, other_guids)
trg_ids_map = bucc2018_lib.create_ids_map(trg_inds, english_guids)
result = bucc2018_lib.mine_bitext(
x=other_lang_embeddings,
y=eng_embeddings,
src_inds=src_inds,
trg_inds=trg_inds,
threshold=threshold,
use_gpu=torch.cuda.is_available(),
)
# Note: Setting thresholds only available in test script
candidates2score = {}
for score, src_idx, trg_idx in result:
for src_key, trg_key in itertools.product(src_ids_map[src_idx], trg_ids_map[trg_idx]):
candidates2score[src_key, trg_key] = score
return candidates2score
def compute_metrics_from_accumulator(
self, task, accumulator: ConcatenateLogitsAccumulator, tokenizer, labels: list
) -> Metrics:
preds = self.get_preds_from_accumulator(task=task, accumulator=accumulator)
return self.compute_metrics_from_preds_and_labels(preds=preds, labels=labels,)
@classmethod
def compute_metrics_from_preds_and_labels(cls, preds, labels):
labels = [tuple(x.split("\t")) for x in labels]
result = bucc2018_lib.bucc_eval(preds, gold=labels, threshold=None)
return Metrics(major=result["F1"], minor=result,)
def get_evaluation_scheme_for_task(task) -> BaseEvaluationScheme:
# TODO: move logic to task? (issue #1182)
if isinstance(
task,
(
tasks_retrieval.AdversarialNliTask,
tasks_retrieval.AbductiveNliTask,
tasks_retrieval.AcceptabilityDefinitenessTask,
tasks_retrieval.AcceptabilityCoordTask,
tasks_retrieval.AcceptabilityEOSTask,
tasks_retrieval.AcceptabilityWHwordsTask,
tasks_retrieval.BoolQTask,
tasks_retrieval.CopaTask,
tasks_retrieval.FeverNliTask,
tasks_retrieval.MnliTask,
tasks_retrieval.MnliLinearizedAMRTask,
tasks_retrieval.MnliAMRTask,
tasks_retrieval.PawsXTask,
tasks_retrieval.QnliTask,
tasks_retrieval.RaceTask,
tasks_retrieval.RteTask,
tasks_retrieval.SciTailTask,
tasks_retrieval.SentEvalBigramShiftTask,
tasks_retrieval.SentEvalCoordinationInversionTask,
tasks_retrieval.SentEvalObjNumberTask,
tasks_retrieval.SentEvalOddManOutTask,
tasks_retrieval.SentEvalPastPresentTask,
tasks_retrieval.SentEvalSentenceLengthTask,
tasks_retrieval.SentEvalSubjNumberTask,
tasks_retrieval.SentEvalTopConstituentsTask,
tasks_retrieval.SentEvalTreeDepthTask,
tasks_retrieval.SentEvalWordContentTask,
tasks_retrieval.SnliTask,
tasks_retrieval.SstTask,
tasks_retrieval.WiCTask,
tasks_retrieval.WnliTask,
tasks_retrieval.WSCTask,
tasks_retrieval.XnliTask,
tasks_retrieval.MCScriptTask,
tasks_retrieval.ArctTask,
tasks_retrieval.PiqaTask,
),
):
return SimpleAccuracyEvaluationScheme()
elif isinstance(task, tasks_retrieval.MCTACOTask):
return MCTACOEvaluationScheme()
elif isinstance(task, tasks_retrieval.CCGTask):
return CCGEvaluationScheme()
elif isinstance(task, tasks_retrieval.CommitmentBankTask):
return CommitmentBankEvaluationScheme()
elif isinstance(task, tasks_retrieval.ColaTask):
return MCCEvaluationScheme()
elif isinstance(
task,
(
tasks_retrieval.ArcEasyTask,
tasks_retrieval.ArcChallengeTask,
tasks_retrieval.CommonsenseQATask,
tasks_retrieval.CosmosQATask,
tasks_retrieval.SWAGTask,
tasks_retrieval.HellaSwagTask,
tasks_retrieval.MutualTask,
tasks_retrieval.MutualPlusTask,
tasks_retrieval.QuailTask,
tasks_retrieval.SocialIQATask,
tasks_retrieval.WinograndeTask,
tasks_retrieval.MCTestTask,
),
):
return MultipleChoiceAccuracyEvaluationScheme()
elif isinstance(task, (tasks_retrieval.MrpcTask, tasks_retrieval.QqpTask)):
return AccAndF1EvaluationScheme()
elif isinstance(
task,
(
tasks_retrieval.Spr1Task,
tasks_retrieval.Spr2Task,
tasks_retrieval.SemevalTask,
tasks_retrieval.SrlTask,
tasks_retrieval.NerTask,
tasks_retrieval.CorefTask,
tasks_retrieval.DprTask,
tasks_retrieval.DepTask,
tasks_retrieval.PosTask,
tasks_retrieval.NonterminalTask,
),
):
return MultiLabelAccAndF1EvaluationScheme()
elif isinstance(task, tasks_retrieval.ReCoRDTask):
return ReCordEvaluationScheme()
elif isinstance(
task,
(
tasks_retrieval.SquadTask,
tasks_retrieval.RopesTask,
tasks_retrieval.QuorefTask,
tasks_retrieval.NewsQATask,
tasks_retrieval.MrqaNaturalQuestionsTask,
),
):
return SQuADEvaluationScheme()
elif isinstance(task, (tasks_retrieval.TyDiQATask, tasks_retrieval.XquadTask)):
return XlingQAEvaluationScheme()
elif isinstance(task, tasks_retrieval.MlqaTask):
return MLQAEvaluationScheme()
elif isinstance(task, tasks_retrieval.MultiRCTask):
return MultiRCEvaluationScheme()
elif isinstance(task, tasks_retrieval.StsbTask):
return PearsonAndSpearmanEvaluationScheme()
elif isinstance(task, tasks_retrieval.MLMSimpleTask):
return MLMEvaluationScheme()
elif isinstance(task, (tasks_retrieval.MLMPremaskedTask, tasks_retrieval.MLMPretokenizedTask)):
return MLMPremaskedEvaluationScheme()
elif isinstance(task, (tasks_retrieval.QAMRTask, tasks_retrieval.QASRLTask)):
return SpanPredictionF1andEMScheme()
elif isinstance(task, (tasks_retrieval.UdposTask, tasks_retrieval.PanxTask)):
return F1TaggingEvaluationScheme()
elif isinstance(task, tasks_retrieval.Bucc2018Task):
return Bucc2018EvaluationScheme()
elif isinstance(task, tasks_retrieval.TatoebaTask):
return TatoebaEvaluationScheme()
else:
raise KeyError(task)
def get_label_ids(task, examples):
return np.array([task.LABEL_TO_ID[example.label] for example in examples])
def get_label_ids_from_data_row(data_row):
return data_row.label_ids
def get_multi_label_ids_from_cache(cache):
return np.array(
[get_label_ids_from_data_row(data_row=datum["data_row"]) for datum in cache.iter_all()]
)
def get_label_id_from_data_row(data_row):
return data_row.label_id
def get_label_ids_from_cache(cache):
return np.array(
[get_label_id_from_data_row(data_row=datum["data_row"]) for datum in cache.iter_all()]
)
def get_label_vals_from_cache(cache):
return np.array(
[get_label_val_from_data_row(data_row=datum["data_row"]) for datum in cache.iter_all()]
)
def get_label_val_from_data_row(data_row):
return data_row.label
def get_multiple_choice_label_ids_from_examples(task, examples):
return np.array([task.CHOICE_BIMAP.a[example.label] for example in examples])
def get_multiple_choice_label_id_from_data_row(data_row):
return data_row.label_id
def get_multiple_choice_labels_from_cache(cache):
return np.array(
[
get_multiple_choice_label_id_from_data_row(data_row=datum["data_row"])
for datum in cache.iter_all()
]
)
def mean(*args) -> float:
return float(np.mean(args))
def write_metrics(results, output_path, verbose=True):
results_to_write = {}
if "loss" in results:
results_to_write["loss"] = results["loss"]
if "metrics" in results:
results_to_write["metrics"] = results["metrics"].to_dict()
assert results_to_write
metrics_str = json.dumps(results_to_write, indent=2)
if verbose:
print(metrics_str)
with open(output_path, "w") as f:
f.write(metrics_str)
| [
"torch.cuda.is_available"
] | 1.8.1 | mfk3138/jiant | 6e67ff1ecb1bb98533c1019a86af4ad2c04c6a64 |
1.4 | import torch
import torch.nn as nn
from ..attack import Attack
class MIFGSM(Attack):
r"""
MI-FGSM in the paper 'Boosting Adversarial Attacks with Momentum'
[https://arxiv.org/abs/1710.06081]
Distance Measure : Linf
Arguments:
model (nn.Module): model to attack.
eps (float): maximum perturbation. (DEFALUT: 8/255)
decay (float): momentum factor. (DEFAULT: 1.0)
steps (int): number of iterations. (DEFAULT: 5)
Shape:
- images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`, `H = height` and `W = width`. It must have a range [0, 1].
- labels: :math:`(N)` where each value :math:`y_i` is :math:`0 \leq y_i \leq` `number of labels`.
- output: :math:`(N, C, H, W)`.
Examples::
>>> attack = torchattacks.MIFGSM(model, eps=8/255, steps=5, decay=1.0)
>>> adv_images = attack(images, labels)
"""
def __init__(self, model, eps=8/255, steps=5, decay=1.0):
super(MIFGSM, self).__init__("MIFGSM", model)
self.eps = eps
self.steps = steps
self.decay = decay
self.alpha = self.eps / self.steps
def forward(self, images, labels):
r"""
Overridden.
"""
images = images.clone().detach().to(self.device)
labels = labels.clone().detach().to(self.device)
labels = self._transform_label(images, labels)
loss = nn.CrossEntropyLoss()
momentum = torch.zeros_like(images).detach().to(self.device)
adv_images = images.clone().detach()
for i in range(self.steps):
adv_images.requires_grad = True
outputs = self.model(adv_images)
cost = self._targeted*loss(outputs, labels)
grad = torch.autograd.grad(cost, adv_images,
retain_graph=False, create_graph=False)[0]
grad_norm = torch.norm(nn.Flatten()(grad), p=1, dim=1)
grad = grad / grad_norm.view([-1]+[1]*(len(grad.shape)-1))
grad = grad + momentum*self.decay
momentum = grad
adv_images = adv_images.detach() - self.alpha*grad.sign()
delta = torch.clamp(adv_images - images, min=-self.eps, max=self.eps)
adv_images = torch.clamp(images + delta, min=0, max=1).detach()
return adv_images
| [
"torch.clamp",
"torch.autograd.grad",
"torch.zeros_like",
"torch.nn.CrossEntropyLoss",
"torch.nn.Flatten"
] | 1.4.0 | spokV/adversarial-attacks-pytorch | 2fa41799e38de2e318f4ba5f7815fba6610fc9af |
1.4 | # MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from collections.abc import Iterable
import numpy as np
import torch
from smarts.core.sensors import VehicleObservation
from ultra.baselines.common.social_vehicle_extraction import get_social_vehicles
from ultra.scenarios.common.visualization import draw_intersection
from ultra.utils.common import (
get_closest_waypoint,
normalize_im,
rotate2d_vector,
to_2d_action,
to_3d_action,
)
identity_func = lambda x, *args, **kwargs: x
class StatePreprocessor:
def __init__(self, preprocess_state_func, convert_action_func, state_description):
self.preprocess_state_func = preprocess_state_func
self.convert_action_func = convert_action_func
self.state_description = state_description
def __call__(
self,
state,
social_capacity,
observation_num_lookahead,
social_vehicle_config,
prev_action,
draw=False,
normalize=False,
unsqueeze=False,
device=None,
):
return self.preprocess_state_func(
state,
self.state_description,
observation_num_lookahead=observation_num_lookahead,
social_capacity=social_capacity,
normalize=normalize,
unsqueeze=unsqueeze,
device=device,
convert_action_func=self.convert_action_func,
social_vehicle_config=social_vehicle_config,
prev_action=prev_action,
draw=draw,
)
def preprocess_state(
state,
state_description,
convert_action_func,
observation_num_lookahead,
social_capacity,
social_vehicle_config,
prev_action,
normalize=False,
unsqueeze=False,
device=None,
draw=False,
):
state = state.copy()
images = {}
for k in state_description["images"]:
image = torch.from_numpy(state[k])
image = image.unsqueeze(0) if unsqueeze else image
image = image.to(device) if device else image
image = normalize_im(image) if normalize else image
images[k] = image
if "action" in state:
state["action"] = convert_action_func(state["action"])
# -------------------------------------
# filter lookaheads from goal_path
_, lookahead_wps = get_closest_waypoint(
num_lookahead=observation_num_lookahead,
goal_path=state["goal_path"],
ego_position=state["ego_position"],
ego_heading=state["heading"],
)
state["waypoints_lookahead"] = np.hstack(lookahead_wps)
# -------------------------------------
# keep prev_action
state["action"] = prev_action
# -------------------------------------
# normalize states and concat
normalized = [
_normalize(key, state[key]) for key in state_description["low_dim_states"]
]
low_dim_states = [
val if isinstance(val, Iterable) else np.asarray([val]).astype(np.float32)
for val in normalized
]
low_dim_states = torch.cat(
[torch.from_numpy(e).float() for e in low_dim_states], dim=-1
)
low_dim_states = low_dim_states.unsqueeze(0) if unsqueeze else low_dim_states
low_dim_states = low_dim_states.to(device) if device else low_dim_states
# -------------------------------------
# apply social vehicle encoder
# only process if state is not encoded already
state["social_vehicles"] = (
get_social_vehicles(
ego_vehicle_pos=state["ego_position"],
ego_vehicle_heading=state["heading"],
neighborhood_vehicles=state["social_vehicles"],
social_vehicle_config=social_vehicle_config,
waypoint_paths=state["waypoint_paths"],
)
if social_capacity > 0
else []
)
# check if any social capacity is 0
social_vehicle_dimension = state_description["social_vehicles"]
social_vehicles = torch.empty(0, 0)
if social_vehicle_dimension:
social_vehicles = torch.from_numpy(np.asarray(state["social_vehicles"])).float()
social_vehicles = social_vehicles.reshape((-1, social_vehicle_dimension))
social_vehicles = social_vehicles.unsqueeze(0) if unsqueeze else social_vehicles
social_vehicles = social_vehicles.to(device) if device else social_vehicles
out = {
"images": images,
"low_dim_states": low_dim_states,
"social_vehicles": social_vehicles,
}
return out
def get_state_description(
social_vehicle_config, observation_waypoints_lookahead, action_size
):
return {
"images": {},
"low_dim_states": {
"speed": 1,
"distance_from_center": 1,
"steering": 1,
"angle_error": 1,
"relative_goal_position": 2,
"action": int(action_size), # 2
"waypoints_lookahead": 2 * int(observation_waypoints_lookahead),
"road_speed": 1,
},
"social_vehicles": int(social_vehicle_config["num_social_features"])
if int(social_vehicle_config["social_capacity"]) > 0
else 0,
}
def _normalize(key, val):
ref = {
"speed": 30.0,
"distance_from_center": 1.0,
"steering": 3.14, # radians
"angle_error": 3.14, # radians
"relative_goal_position": 100.0,
"action": 1.0, # 2
"waypoints_lookahead": 10.0,
"road_speed": 30.0,
}
if key not in ref:
return val
return val / ref[key]
# all_waypoints = [
# [linked_wp.pos[0], linked_wp.pos[1]] for linked_wp in state["goal_path"]
# ]
# if draw:
# draw_intersection(
# ego=state["ego_position"],
# goal_path=state["goal_path"],
# all_waypoints=all_waypoints,
# step=step,
# goal=state["goal"],
# start=state["start"],
# lookaheads=state["waypoints_lookahead"],
# social_vehicle_states=state["social_vehicles"],
# finished_vehicles=[],
# )
| [
"torch.empty",
"torch.from_numpy"
] | 1.4.0 | jerryzhao173985/SMARTS | c8473049b4b52ad3eb29f7c7eb93753c2d70df55 |
1.6 | import pytest
import torch
import torch.nn.functional as F
from d3rlpy.models.encoders import DefaultEncoderFactory
from d3rlpy.models.torch.imitators import (
ConditionalVAE,
DeterministicRegressor,
DiscreteImitator,
ProbablisticRegressor,
)
from .model_test import DummyEncoder, check_parameter_updates
@pytest.mark.parametrize("feature_size", [100])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("latent_size", [32])
@pytest.mark.parametrize("beta", [0.5])
@pytest.mark.parametrize("batch_size", [32])
@pytest.mark.parametrize("n", [100])
def test_conditional_vae(
feature_size, action_size, latent_size, beta, batch_size, n
):
encoder_encoder = DummyEncoder(feature_size, action_size, True)
decoder_encoder = DummyEncoder(feature_size, latent_size, True)
vae = ConditionalVAE(encoder_encoder, decoder_encoder, beta)
# check output shape
x = torch.rand(batch_size, feature_size)
action = torch.rand(batch_size, action_size)
y = vae(x, action)
assert y.shape == (batch_size, action_size)
# check encode
dist = vae.encode(x, action)
assert isinstance(dist, torch.distributions.Normal)
assert dist.mean.shape == (batch_size, latent_size)
# check decode
latent = torch.rand(batch_size, latent_size)
y = vae.decode(x, latent)
assert y.shape == (batch_size, action_size)
# check sample
y = vae.sample(x)
assert y.shape == (batch_size, action_size)
# check sample_n
y = vae.sample_n(x, n)
assert y.shape == (batch_size, n, action_size)
# check sample_n_without_squash
y = vae.sample_n_without_squash(x, n)
assert y.shape == (batch_size, n, action_size)
# TODO: test vae.compute_likelihood_loss(x, action)
# check layer connections
check_parameter_updates(vae, (x, action))
@pytest.mark.parametrize("feature_size", [100])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("beta", [1e-2])
@pytest.mark.parametrize("batch_size", [32])
def test_discrete_imitator(feature_size, action_size, beta, batch_size):
encoder = DummyEncoder(feature_size)
imitator = DiscreteImitator(encoder, action_size, beta)
# check output shape
x = torch.rand(batch_size, feature_size)
y = imitator(x)
assert torch.allclose(y.exp().sum(dim=1), torch.ones(batch_size))
y, logits = imitator.compute_log_probs_with_logits(x)
assert torch.allclose(y, F.log_softmax(logits, dim=1))
action = torch.randint(low=0, high=action_size - 1, size=(batch_size,))
loss = imitator.compute_error(x, action)
penalty = (logits**2).mean()
assert torch.allclose(loss, F.nll_loss(y, action) + beta * penalty)
# check layer connections
check_parameter_updates(imitator, (x, action))
@pytest.mark.parametrize("feature_size", [100])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("batch_size", [32])
def test_deterministic_regressor(feature_size, action_size, batch_size):
encoder = DummyEncoder(feature_size)
imitator = DeterministicRegressor(encoder, action_size)
x = torch.rand(batch_size, feature_size)
y = imitator(x)
assert y.shape == (batch_size, action_size)
action = torch.rand(batch_size, action_size)
loss = imitator.compute_error(x, action)
assert torch.allclose(F.mse_loss(y, action), loss)
# check layer connections
check_parameter_updates(imitator, (x, action))
@pytest.mark.parametrize("feature_size", [100])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("batch_size", [32])
@pytest.mark.parametrize("n", [10])
def test_probablistic_regressor(feature_size, action_size, batch_size, n):
encoder = DummyEncoder(feature_size)
imitator = ProbablisticRegressor(
encoder, action_size, min_logstd=-20, max_logstd=2
)
x = torch.rand(batch_size, feature_size)
y = imitator(x)
assert y.shape == (batch_size, action_size)
action = torch.rand(batch_size, action_size)
loss = imitator.compute_error(x, action)
y = imitator.sample_n(x, n)
assert y.shape == (batch_size, n, action_size)
# check layer connections
check_parameter_updates(imitator, (x, action))
| [
"torch.rand",
"torch.nn.functional.log_softmax",
"torch.nn.functional.mse_loss",
"torch.ones",
"torch.randint",
"torch.nn.functional.nll_loss"
] | 1.6.0 | mcx/d3rlpy | 9867803a096b8a90e376443a0ffabb4765f38145 |
1.0 | import math
import torch
import torch.nn as nn
from deepke.model import BasicModule, Embedding
class DotAttention(nn.Module):
'''
\text {Attention }(Q, K, V)=\operatorname{softmax}\left(\frac{Q K^{T}}{\sqrt{d_{k}}}\right) V
'''
def __init__(self, dropout=0.0):
super(DotAttention, self).__init__()
self.drop = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=-1)
def forward(self, Q, K, V, mask_out=None):
"""
:param Q: [batch, seq_len_q, feature_size]
:param K: [batch, seq_len_k, feature_size]
:param V: [batch, seq_len_k, feature_size]
:param mask_out: [batch, 1, seq_len] or [batch, seq_len_q, seq_len_k]
"""
feature_size = Q.size(-1)
scale = math.sqrt(feature_size)
output = torch.matmul(Q, K.transpose(1, 2)) / scale
if mask_out is not None:
output.masked_fill_(mask_out, -1e18)
output = self.softmax(output)
output = self.drop(output)
return torch.matmul(output, V)
class MultiHeadAttention(nn.Module):
"""
:param feature_size: int, 输入维度的大小。同时也是输出维度的大小。
:param num_head: int,head的数量。
:param dropout: float。
"""
def __init__(self, feature_size, num_head, dropout=0.2):
super(MultiHeadAttention, self).__init__()
self.feature_size = feature_size
self.num_head = num_head
self.q_in = nn.Linear(feature_size, feature_size * num_head)
self.k_in = nn.Linear(feature_size, feature_size * num_head)
self.v_in = nn.Linear(feature_size, feature_size * num_head)
self.attention = DotAttention(dropout=dropout)
self.out = nn.Linear(feature_size * num_head, feature_size)
def forward(self, Q, K, V, att_mask_out=None):
"""
:param Q: [batch, seq_len_q, feature_size]
:param K: [batch, seq_len_k, feature_size]
:param V: [batch, seq_len_k, feature_size]
:param seq_mask: [batch, seq_len]
"""
batch, sq, feature = Q.size()
sk = K.size(1)
n_head = self.num_head
# input linear
q = self.q_in(Q).view(batch, sq, n_head, feature)
k = self.k_in(K).view(batch, sk, n_head, feature)
v = self.v_in(V).view(batch, sk, n_head, feature)
# transpose q, k and v to do batch attention
# [batch, seq_len, num_head, feature] => [num_head*batch, seq_len, feature]
q = q.permute(2, 0, 1, 3).contiguous().view(-1, sq, feature)
k = k.permute(2, 0, 1, 3).contiguous().view(-1, sk, feature)
v = v.permute(2, 0, 1, 3).contiguous().view(-1, sk, feature)
if att_mask_out is not None:
att_mask_out = att_mask_out.repeat(n_head, 1, 1)
att = self.attention(q, k, v, att_mask_out).view(n_head, batch, sq, feature)
# concat all heads, do output linear
# [num_head, batch, seq_len, feature] => [batch, seq_len, num_head*feature]
att = att.permute(1, 2, 0, 3).contiguous().view(batch, sq, -1)
output = self.out(att)
return output
class Transformer(BasicModule):
def __init__(self, vocab_size, config):
super(Transformer, self).__init__()
self.model_name = 'Transformer'
self.vocab_size = vocab_size
self.word_dim = config.model.word_dim
self.pos_size = config.model.pos_size
self.pos_dim = config.model.pos_dim
self.hidden_dim = config.model.hidden_dim
self.dropout = config.model.dropout
self.layers = config.transformer.transformer_layers
self.out_dim = config.relation_type
self.embedding = Embedding(self.vocab_size, self.word_dim, self.pos_size, self.pos_dim)
self.feature_dim = self.word_dim + self.pos_dim * 2
self.att = MultiHeadAttention(self.feature_dim, num_head=4)
self.norm1 = nn.LayerNorm(self.feature_dim)
self.ffn = nn.Sequential(nn.Linear(self.feature_dim, self.hidden_dim), nn.ReLU(),
nn.Linear(self.hidden_dim, self.feature_dim), nn.Dropout(self.dropout))
self.norm2 = nn.LayerNorm(self.feature_dim)
self.fc = nn.Linear(self.feature_dim, self.out_dim)
def forward(self, input):
*x, mask = input
x = self.embedding(x)
att_mask_out = mask.eq(0).unsqueeze(1)
for i in range(self.layers):
attention = self.att(x, x, x, att_mask_out)
norm_att = self.norm1(attention + x)
x = self.ffn(norm_att)
x = self.norm2(x + norm_att)
x = x[:, 0]
out = self.fc(x)
return out
if __name__ == '__main__':
torch.manual_seed(1)
q = torch.randn(32, 50, 100)
k = torch.randn(32, 60, 100)
v = torch.randn(32, 60, 100)
mask = torch.randn(32, 60).unsqueeze(1).gt(0)
att1 = DotAttention()
out = att1(q, k, v, mask)
print(out.shape) # [32, 50, 100]
att2 = MultiHeadAttention(feature_size=100, num_head=8)
out = att2(q, k, v, mask)
print(out.shape) # [32, 50, 100]
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.Softmax",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.matmul",
"torch.randn"
] | 1.0 | napoler/deepke | 4d32527a22b7664600fe06fb5e24e1bedaaba97d |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
from sklearn.metrics import average_precision_score as sk_average_precision_score
from torch import tensor
from tests.classification.inputs import _input_binary_prob
from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob
from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob
from tests.helpers import seed_all
from tests.helpers.testers import NUM_CLASSES, MetricTester
from torchmetrics.classification.average_precision import AveragePrecision
from torchmetrics.functional import average_precision
seed_all(42)
def _sk_average_precision_score(y_true, probas_pred, num_classes=1):
if num_classes == 1:
return sk_average_precision_score(y_true, probas_pred)
res = []
for i in range(num_classes):
y_true_temp = np.zeros_like(y_true)
y_true_temp[y_true == i] = 1
res.append(sk_average_precision_score(y_true_temp, probas_pred[:, i]))
return res
def _sk_avg_prec_binary_prob(preds, target, num_classes=1):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
return _sk_average_precision_score(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes)
def _sk_avg_prec_multiclass_prob(preds, target, num_classes=1):
sk_preds = preds.reshape(-1, num_classes).numpy()
sk_target = target.view(-1).numpy()
return _sk_average_precision_score(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes)
def _sk_avg_prec_multidim_multiclass_prob(preds, target, num_classes=1):
sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy()
sk_target = target.view(-1).numpy()
return _sk_average_precision_score(y_true=sk_target, probas_pred=sk_preds, num_classes=num_classes)
@pytest.mark.parametrize(
"preds, target, sk_metric, num_classes", [
(_input_binary_prob.preds, _input_binary_prob.target, _sk_avg_prec_binary_prob, 1),
(_input_mcls_prob.preds, _input_mcls_prob.target, _sk_avg_prec_multiclass_prob, NUM_CLASSES),
(_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_avg_prec_multidim_multiclass_prob, NUM_CLASSES),
]
)
class TestAveragePrecision(MetricTester):
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
def test_average_precision(self, preds, target, sk_metric, num_classes, ddp, dist_sync_on_step):
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=AveragePrecision,
sk_metric=partial(sk_metric, num_classes=num_classes),
dist_sync_on_step=dist_sync_on_step,
metric_args={"num_classes": num_classes}
)
def test_average_precision_functional(self, preds, target, sk_metric, num_classes):
self.run_functional_metric_test(
preds,
target,
metric_functional=average_precision,
sk_metric=partial(sk_metric, num_classes=num_classes),
metric_args={"num_classes": num_classes},
)
@pytest.mark.parametrize(
['scores', 'target', 'expected_score'],
[
# Check the average_precision_score of a constant predictor is
# the TPR
# Generate a dataset with 25% of positives
# And a constant score
# The precision is then the fraction of positive whatever the recall
# is, as there is only one threshold:
pytest.param(tensor([1, 1, 1, 1]), tensor([0, 0, 0, 1]), .25),
# With threshold 0.8 : 1 TP and 2 TN and one FN
pytest.param(tensor([.6, .7, .8, 9]), tensor([1, 0, 0, 1]), .75),
]
)
def test_average_precision(scores, target, expected_score):
assert average_precision(scores, target) == expected_score
| [
"torch.tensor"
] | 1.3.1 | victorjoos/lightning-metrics | f06488faf79d4f4792cd392e964870d4898dde45 |
1.4 | import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / 'lib'
def _get_lib_path(lib: str):
suffix = 'pyd' if os.name == 'nt' else 'so'
path = _LIB_DIR / f'{lib}.{suffix}'
return path
def _load_lib(lib: str):
path = _get_lib_path(lib)
# In case `torchaudio` is deployed with `pex` format, this file does not exist.
# In this case, we expect that `libtorchaudio` is available somewhere
# in the search path of dynamic loading mechanism, and importing `_torchaudio`,
# which depends on `libtorchaudio` and dynamic loader will handle it for us.
if path.exists():
torch.ops.load_library(path)
torch.classes.load_library(path)
def _init_extension():
if not _mod_utils.is_module_available('torchaudio._torchaudio'):
warnings.warn('torchaudio C++ extension is not available.')
return
_load_lib('libtorchaudio')
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
_init_extension()
| [
"torch.ops.load_library",
"torch.classes.load_library"
] | 1.4.0 | StatsGary/audio | 0a701058b432dd602bba3461866bfb3c3a352e04 |
1.0 | import torch
import torch.utils.model_zoo as model_zoo
import os
from collections import OrderedDict
def load_checkpoint(model, checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
state_dict_key = ''
if isinstance(checkpoint, dict):
state_dict_key = 'state_dict'
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
# strip `module.` prefix
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
else:
model.load_state_dict(checkpoint)
print("=> Loaded {} from checkpoint '{}'".format(state_dict_key or 'weights', checkpoint_path))
else:
print("=> Error: No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def resume_checkpoint(model, checkpoint_path, start_epoch=None):
optimizer_state = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if 'optimizer' in checkpoint:
optimizer_state = checkpoint['optimizer']
start_epoch = checkpoint['epoch'] if start_epoch is None else start_epoch
print("=> Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
start_epoch = 0 if start_epoch is None else start_epoch
print("=> Loaded checkpoint '{}'".format(checkpoint_path))
return optimizer_state, start_epoch
else:
print("=> Error: No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_pretrained(model, default_cfg, num_classes=1000, in_chans=3, filter_fn=None):
if 'url' not in default_cfg or not default_cfg['url']:
print("Warning: pretrained model URL is invalid, using random initialization.")
return
state_dict = model_zoo.load_url(default_cfg['url'])
if in_chans == 1:
conv1_name = default_cfg['first_conv']
print('Converting first conv (%s) from 3 to 1 channel' % conv1_name)
conv1_weight = state_dict[conv1_name + '.weight']
state_dict[conv1_name + '.weight'] = conv1_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
assert False, "Invalid in_chans for pretrained weights"
strict = True
classifier_name = default_cfg['classifier']
if num_classes == 1000 and default_cfg['num_classes'] == 1001:
# special case for imagenet trained models with extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[1:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[1:]
elif num_classes != default_cfg['num_classes']:
# completely discard fully connected for all other differences between pretrained and created model
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
if filter_fn is not None:
state_dict = filter_fn(state_dict)
model.load_state_dict(state_dict, strict=strict)
| [
"torch.utils.model_zoo.load_url",
"torch.load"
] | 1.0 | bermanmaxim/pytorch-image-models | 1d7f2d93a68bdc3c5d8d634d869709f6cdd7cecd |
1.9 | import argparse
import os
from pathlib import Path
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as T
from tqdm import tqdm
from torchvision.datasets import CIFAR10, CIFAR100, STL10, ImageNet, ImageFolder
import numpy as np
import pandas as pd
from models.neuralhash import NeuralHash
from utils.hashing import load_hash_matrix
from utils.transforms import Rotate, Translate, ChangeSaturation, ChangeHue, ChangeContrast, ChangeBrightness, \
JpegCompression, HorizontalFlipping, BlackBorder, CenterCrop, VerticalFlipping
def get_dataset(dataset_name: str, additional_transforms=None):
img_transform = get_transforms(additional_transforms=additional_transforms)
if dataset_name.lower() == 'stl10':
dataset = STL10(root='data', split='train', transform=img_transform, download=True)
elif dataset_name.lower() == 'cifar10':
dataset = CIFAR10(root='data', train=True, transform=img_transform, download=True)
elif dataset_name.lower() == 'cifar100':
dataset = CIFAR100(root='data', train=True, transform=img_transform, download=True)
elif dataset_name.lower() == 'imagenet_test':
dataset = ImageFolder(root='data/ILSVRC2012_test', transform=img_transform)
elif dataset_name.lower() == 'imagenet_train':
dataset = ImageNet(root='data/ILSVRC2012', split='train', transform=img_transform)
elif dataset_name.lower() == 'imagenet_val':
dataset = ImageNet(root='data/ILSVRC2012', split='val', transform=img_transform)
else:
raise RuntimeError(f'Dataset with name {dataset_name} was not found.')
return dataset
def get_transforms(additional_transforms=None):
transforms = [
T.Resize((360, 360)),
T.ToTensor()
]
if additional_transforms is not None and type(additional_transforms) == list:
transforms.extend(additional_transforms)
transforms.append(T.Lambda(lambda x: x * 2 - 1))
img_transform = T.Compose(transforms)
return img_transform
def get_translation_tuples(max_trans, trans_log_base, trans_steps):
translations = []
values = np.unique(
np.ceil(
np.logspace(0, np.log(max_trans) / np.log(trans_log_base), trans_steps, endpoint=True, base=trans_log_base)
).astype(int)
)
values = [0] + values.tolist()
for hor_trans in values:
for vert_trans in values:
translations.append((hor_trans, vert_trans))
return translations
def get_rotation_angles(max_rot_angle, rot_log_base, rot_steps):
# create the list of angle and rotation values
angles = np.unique(
np.ceil(
np.logspace(0, np.log(max_rot_angle) / np.log(rot_log_base), rot_steps, endpoint=True, base=rot_log_base)
).astype(int)
)
angles = np.flip(-angles).tolist() + [0] + angles.tolist()
return angles
def get_hashes(dataset, model, seed, device, batch_size=128, num_workers=8):
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
binary_hashes = []
hex_hashes = []
with torch.no_grad():
for x, y in tqdm(dataloader, desc='Getting Neural Hashes', leave=False):
x = x.to(device)
hash = model(x).squeeze().unsqueeze(2)
hash = torch.matmul(seed.repeat(len(x), 1, 1), hash)
hash = torch.sign(hash).view(len(x), -1).cpu()
# convert the tensor from [-1, 1] to [0, 1]
hash = (hash > 0).type(torch.IntTensor)
hash_bin = [''.join(list(map(str, x.tolist()))) for x in hash]
hash_hex = ['{:0{}x}'.format(int(hash_bits, 2), len(hash_bits) // 4) for hash_bits in hash_bin]
binary_hashes.extend(hash_bin)
hex_hashes.extend(hash_hex)
return binary_hashes, hex_hashes
def run_augmentation(dataset, model, seed, device, augmentation, augmentation_inputs, file_paths, batch_size=128,
num_workers=8):
for augm_input, file_path in tqdm(zip(augmentation_inputs, file_paths), desc=augmentation.__name__ if augmentation else 'Original', total=len(augmentation_inputs)):
if os.path.exists(file_path):
continue
# make an empty dummy file to support multiple runs to work together at the same time
if not os.path.exists(file_path):
os.makedirs(os.path.dirname(file_path), exist_ok=True)
Path(file_path).touch(exist_ok=False)
if augmentation is not None:
new_transforms = get_transforms(additional_transforms=[augmentation(augm_input) if augm_input is not None else augmentation()])
dataset.transform = new_transforms
binary_hashes, hex_hashes = get_hashes(dataset, model, seed, device, batch_size=batch_size,
num_workers=num_workers)
hash_df = pd.DataFrame(columns=['image', 'hash_bin', 'hash_hex'])
hash_df = hash_df.assign(hash_bin=binary_hashes, hash_hex=hex_hashes)
if hasattr(dataset, 'imgs'):
hash_df = hash_df.assign(image=list(np.array(dataset.imgs)[:, 0]))
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path), exist_ok=False)
hash_df.to_csv(file_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='imagenet_train',
choices=['stl10', 'cifar10', 'cifar100', 'imagenet_test', 'imagenet_train', 'imagenet_val'], type=str,
help='The dataset that is used')
parser.add_argument('--batch_size', default=128, type=int, help='The batch size used for inference')
parser.add_argument('--max_rot_angle', default=64, type=int,
help='The angle (in degrees) by which the image is rotated clockwise and counterclockwise')
parser.add_argument('--rot_log_base', default=2, type=int, help='The logarithm base')
parser.add_argument('--rot_steps', default=7, type=int, help='The number of rotations steps')
parser.add_argument('--max_trans', default=64, type=int,
help='The max translation in pixels by which the image is going to be translated')
parser.add_argument('--trans_log_base', default=2, type=int, help='The logarithm base')
parser.add_argument('--trans_steps', default=7, type=int,
help='The number of translation steps in vertical and horizontal direction, respectively')
parser.add_argument('--device', default='cuda', type=str, help='The device used for inference')
parser.add_argument('--num_workers', default=8, type=int,
help='The number of workers that is used for loading the data')
parser.add_argument('--output_dir', default='logs', type=str,
help='The output directory where the results are going to be saved as CSV files')
args = parser.parse_args()
device = torch.device(args.device)
model = NeuralHash()
model.load_state_dict(torch.load('./models/model.pth'))
model = model.to(device)
seed = torch.tensor(load_hash_matrix())
seed = seed.to(device)
output_dir = os.path.join(args.output_dir, f'{args.dataset}')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dataset = get_dataset(args.dataset)
# get the rotation angles and the translation tuples
angles = get_rotation_angles(args.max_rot_angle, args.rot_log_base, args.rot_steps)
translations = get_translation_tuples(args.max_trans, args.trans_log_base, args.trans_steps)
hue_values = list(range(-180, 180, 30))
saturation_values = list(np.linspace(0, 2, 9, endpoint=True))
brightness_values = list(np.linspace(0, 2, 9, endpoint=True))
contrast_values = list(np.linspace(0, 2, 9, endpoint=True))
compression_values = [100] + list(
(100 - np.ceil(np.logspace(0, np.log(100) / np.log(1.5), 10, endpoint=True, base=1.5))).clip(0, 100)
)
crop_values = list(
filter(
lambda x: x != 359,
[360] + list(360 - np.append(np.logspace(0, 7, 8, base=2, endpoint=True, dtype=int), [180]))
)
)
downsizing_values = list(
filter(
lambda x: x != 359,
[360] + list(360 - np.append(np.logspace(0, 7, 8, base=2, endpoint=True, dtype=int), [180]))
)
)
iterations = len(angles) + len(translations) + len(hue_values) + len(saturation_values) + \
len(brightness_values) + len(contrast_values) + len(compression_values) + len(crop_values) + len(downsizing_values) + 1
# get the initial hashes
run_augmentation(
dataset,
model,
seed,
device,
None,
[None],
[os.path.join(output_dir, f'{args.dataset}_original.csv')],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against rotations
run_augmentation(
dataset,
model,
seed,
device,
Rotate,
angles,
[os.path.join(output_dir, 'rotation', f'{args.dataset}_rotation_{angle}.csv') for angle in angles],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against translations
run_augmentation(
dataset,
model,
seed,
device,
Translate,
translations,
[os.path.join(output_dir, 'translation', f'{args.dataset}_translation_{translation[0]}_{translation[1]}.csv') for translation in translations],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against hue changes
run_augmentation(
dataset,
model,
seed,
device,
ChangeHue,
hue_values,
[os.path.join(output_dir, 'hue', f'{args.dataset}_hue_{hue}.csv') for hue in hue_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against saturation changes
run_augmentation(
dataset,
model,
seed,
device,
ChangeSaturation,
saturation_values,
[os.path.join(output_dir, 'saturation', f'{args.dataset}_saturation_{saturation}.csv') for saturation in saturation_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against brightness changes
run_augmentation(
dataset,
model,
seed,
device,
ChangeBrightness,
brightness_values,
[os.path.join(output_dir, 'brightness', f'{args.dataset}_brightness_{brightness}.csv') for brightness in brightness_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against contrast changes
run_augmentation(
dataset,
model,
seed,
device,
ChangeContrast,
contrast_values,
[os.path.join(output_dir, 'contrast', f'{args.dataset}_contrast_{contrast}.csv') for contrast in contrast_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
# test the robustness against compression
run_augmentation(
dataset,
model,
seed,
device,
JpegCompression,
compression_values,
[os.path.join(output_dir, 'compression', f'{args.dataset}_compression_{compression}.csv') for compression in compression_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
run_augmentation(
dataset,
model,
seed,
device,
CenterCrop,
crop_values,
[os.path.join(output_dir, 'crop', f'{args.dataset}_crop_{crop}.csv') for crop in crop_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
run_augmentation(
dataset,
model,
seed,
device,
HorizontalFlipping,
[None],
[os.path.join(output_dir, 'hflip', f'{args.dataset}_hflip.csv')],
batch_size=args.batch_size,
num_workers=args.num_workers
)
run_augmentation(
dataset,
model,
seed,
device,
VerticalFlipping,
[None],
[os.path.join(output_dir, 'vflip', f'{args.dataset}_vflip.csv')],
batch_size=args.batch_size,
num_workers=args.num_workers
)
run_augmentation(
dataset,
model,
seed,
device,
BlackBorder,
downsizing_values,
[os.path.join(output_dir, 'downsizing', f'{args.dataset}_downsizing_{size}.csv') for size in downsizing_values],
batch_size=args.batch_size,
num_workers=args.num_workers
)
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.no_grad",
"torch.sign",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.9.0 | ml-research/Learning-to-Break-Deep-Perceptual-Hashing | 12148e8ecd47faa1f816f52f56662c47cd240cc1 |
0.4 | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='xavier', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='xavier', gpu_ids=[]):
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type)
return net
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='xavier', gpu_ids=[], use_tanh=True, classification=True):
netG = None
norm_layer = get_norm_layer(norm_type=norm)
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif which_model_netG == 'siggraph':
netG = SIGGRAPHGenerator(input_nc, output_nc, norm_layer=norm_layer, use_tanh=use_tanh, classification=classification)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
return init_net(netG, init_type, gpu_ids)
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='xavier', gpu_ids=[]):
netD = None
norm_layer = get_norm_layer(norm_type=norm)
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
elif which_model_netD == 'pixel':
netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
return init_net(netD, init_type, gpu_ids)
##############################################################################
# Classes
##############################################################################
class HuberLoss(nn.Module):
def __init__(self, delta=.01):
super(HuberLoss, self).__init__()
self.delta = delta
def __call__(self, in0, in1):
mask = torch.zeros_like(in0)
mann = torch.abs(in0 - in1)
eucl = .5 * (mann**2)
mask[...] = mann < self.delta
# loss = eucl*mask + self.delta*(mann-.5*self.delta)*(1-mask)
loss = eucl * mask / self.delta + (mann - .5 * self.delta) * (1 - mask)
return torch.sum(loss, dim=1, keepdim=True)
class L1Loss(nn.Module):
def __init__(self):
super(L1Loss, self).__init__()
def __call__(self, in0, in1):
return torch.sum(torch.abs(in0 - in1), dim=1, keepdim=True)
class L2Loss(nn.Module):
def __init__(self):
super(L2Loss, self).__init__()
def __call__(self, in0, in1):
return torch.sum((in0 - in1)**2, dim=1, keepdim=True)
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0):
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(input)
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
class SIGGRAPHGenerator(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, use_tanh=True, classification=True):
super(SIGGRAPHGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.classification = classification
use_bias = True
# Conv1
# model1=[nn.ReflectionPad2d(1),]
model1 = [nn.Conv2d(input_nc, 64, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model1+=[norm_layer(64),]
model1 += [nn.ReLU(True), ]
# model1+=[nn.ReflectionPad2d(1),]
model1 += [nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model1 += [nn.ReLU(True), ]
model1 += [norm_layer(64), ]
# add a subsampling operation
# Conv2
# model2=[nn.ReflectionPad2d(1),]
model2 = [nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model2+=[norm_layer(128),]
model2 += [nn.ReLU(True), ]
# model2+=[nn.ReflectionPad2d(1),]
model2 += [nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model2 += [nn.ReLU(True), ]
model2 += [norm_layer(128), ]
# add a subsampling layer operation
# Conv3
# model3=[nn.ReflectionPad2d(1),]
model3 = [nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model3+=[norm_layer(256),]
model3 += [nn.ReLU(True), ]
# model3+=[nn.ReflectionPad2d(1),]
model3 += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model3+=[norm_layer(256),]
model3 += [nn.ReLU(True), ]
# model3+=[nn.ReflectionPad2d(1),]
model3 += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model3 += [nn.ReLU(True), ]
model3 += [norm_layer(256), ]
# add a subsampling layer operation
# Conv4
# model47=[nn.ReflectionPad2d(1),]
model4 = [nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model4+=[norm_layer(512),]
model4 += [nn.ReLU(True), ]
# model4+=[nn.ReflectionPad2d(1),]
model4 += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model4+=[norm_layer(512),]
model4 += [nn.ReLU(True), ]
# model4+=[nn.ReflectionPad2d(1),]
model4 += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model4 += [nn.ReLU(True), ]
model4 += [norm_layer(512), ]
# Conv5
# model47+=[nn.ReflectionPad2d(2),]
model5 = [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
# model5+=[norm_layer(512),]
model5 += [nn.ReLU(True), ]
# model5+=[nn.ReflectionPad2d(2),]
model5 += [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
# model5+=[norm_layer(512),]
model5 += [nn.ReLU(True), ]
# model5+=[nn.ReflectionPad2d(2),]
model5 += [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
model5 += [nn.ReLU(True), ]
model5 += [norm_layer(512), ]
# Conv6
# model6+=[nn.ReflectionPad2d(2),]
model6 = [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
# model6+=[norm_layer(512),]
model6 += [nn.ReLU(True), ]
# model6+=[nn.ReflectionPad2d(2),]
model6 += [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
# model6+=[norm_layer(512),]
model6 += [nn.ReLU(True), ]
# model6+=[nn.ReflectionPad2d(2),]
model6 += [nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=use_bias), ]
model6 += [nn.ReLU(True), ]
model6 += [norm_layer(512), ]
# Conv7
# model47+=[nn.ReflectionPad2d(1),]
model7 = [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model7+=[norm_layer(512),]
model7 += [nn.ReLU(True), ]
# model7+=[nn.ReflectionPad2d(1),]
model7 += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model7+=[norm_layer(512),]
model7 += [nn.ReLU(True), ]
# model7+=[nn.ReflectionPad2d(1),]
model7 += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model7 += [nn.ReLU(True), ]
model7 += [norm_layer(512), ]
# Conv7
model8up = [nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=use_bias)]
# model3short8=[nn.ReflectionPad2d(1),]
model3short8 = [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model47+=[norm_layer(256),]
model8 = [nn.ReLU(True), ]
# model8+=[nn.ReflectionPad2d(1),]
model8 += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# model8+=[norm_layer(256),]
model8 += [nn.ReLU(True), ]
# model8+=[nn.ReflectionPad2d(1),]
model8 += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model8 += [nn.ReLU(True), ]
model8 += [norm_layer(256), ]
# Conv9
model9up = [nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=use_bias), ]
# model2short9=[nn.ReflectionPad2d(1),]
model2short9 = [nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# add the two feature maps above
# model9=[norm_layer(128),]
model9 = [nn.ReLU(True), ]
# model9+=[nn.ReflectionPad2d(1),]
model9 += [nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
model9 += [nn.ReLU(True), ]
model9 += [norm_layer(128), ]
# Conv10
model10up = [nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=use_bias), ]
# model1short10=[nn.ReflectionPad2d(1),]
model1short10 = [nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=use_bias), ]
# add the two feature maps above
# model10=[norm_layer(128),]
model10 = [nn.ReLU(True), ]
# model10+=[nn.ReflectionPad2d(1),]
model10 += [nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=use_bias), ]
model10 += [nn.LeakyReLU(negative_slope=.2), ]
# classification output
model_class = [nn.Conv2d(256, 529, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias), ]
# regression output
model_out = [nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=use_bias), ]
if(use_tanh):
model_out += [nn.Tanh()]
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.model3 = nn.Sequential(*model3)
self.model4 = nn.Sequential(*model4)
self.model5 = nn.Sequential(*model5)
self.model6 = nn.Sequential(*model6)
self.model7 = nn.Sequential(*model7)
self.model8up = nn.Sequential(*model8up)
self.model8 = nn.Sequential(*model8)
self.model9up = nn.Sequential(*model9up)
self.model9 = nn.Sequential(*model9)
self.model10up = nn.Sequential(*model10up)
self.model10 = nn.Sequential(*model10)
self.model3short8 = nn.Sequential(*model3short8)
self.model2short9 = nn.Sequential(*model2short9)
self.model1short10 = nn.Sequential(*model1short10)
self.model_class = nn.Sequential(*model_class)
self.model_out = nn.Sequential(*model_out)
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='nearest'), ])
self.softmax = nn.Sequential(*[nn.Softmax(dim=1), ])
def forward(self, input_A, input_B, mask_B):
conv1_2 = self.model1(torch.cat((input_A, input_B, mask_B), dim=1))
conv2_2 = self.model2(conv1_2[:, :, ::2, ::2])
conv3_3 = self.model3(conv2_2[:, :, ::2, ::2])
conv4_3 = self.model4(conv3_3[:, :, ::2, ::2])
conv5_3 = self.model5(conv4_3)
conv6_3 = self.model6(conv5_3)
conv7_3 = self.model7(conv6_3)
conv8_up = self.model8up(conv7_3) + self.model3short8(conv3_3)
conv8_3 = self.model8(conv8_up)
if(self.classification):
out_class = self.model_class(conv8_3)
conv9_up = self.model9up(conv8_3.detach()) + self.model2short9(conv2_2.detach())
conv9_3 = self.model9(conv9_up)
conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2.detach())
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
else:
out_class = self.model_class(conv8_3.detach())
conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2)
conv9_3 = self.model9(conv9_up)
conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2)
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
return (out_class, out_reg)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input_A, input_B, mask_B):
# embed()
return self.model(torch.cat((input_A, input_B, mask_B), dim=1))
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
class PixelDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
if use_sigmoid:
self.net.append(nn.Sigmoid())
self.net = nn.Sequential(*self.net)
def forward(self, input):
return self.net(input)
| [
"torch.cat",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.sum",
"torch.nn.Softmax",
"torch.nn.init.constant_",
"torch.nn.ConvTranspose2d",
"torch.abs",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.ReflectionPad2d",
"torch.nn.BCELoss",
"torch.zeros_like",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_normal_",
"torch.nn.ReplicationPad2d",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.nn.Sigmoid",
"torch.nn.Upsample",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.optim.lr_scheduler.LambdaLR"
] | 0.4.0 | linlih/colorization-pytorch | f3fd558d4b99d253988ea7ac8389f6842daa4315 |
1.4 | #!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""execution script."""
import code
import argparse
import os, warnings
import time
import pandas as pd
import pickle
import torch
import utils.exp_utils as utils
from evaluator import Evaluator
from predictor import Predictor
from plotting import plot_batch_prediction
for msg in ["Attempting to set identical bottom==top results",
"This figure includes Axes that are not compatible with tight_layout",
"Data has no positive values, and therefore cannot be log-scaled.",
".*invalid value encountered in double_scalars.*",
".*Mean of empty slice.*"]:
warnings.filterwarnings("ignore", msg)
def train(logger):
"""
perform the training routine for a given fold. saves plots and selected parameters to the experiment dir
specified in the configs.
"""
logger.info('performing training in {}D over fold {} on experiment {} with model {}'.format(
cf.dim, cf.fold, cf.exp_dir, cf.model))
net = model.net(cf, logger).cuda()
if hasattr(cf, "optimizer") and cf.optimizer.lower() == "adam":
logger.info("Using Adam optimizer.")
optimizer = torch.optim.Adam(utils.parse_params_for_optim(net, weight_decay=cf.weight_decay,
exclude_from_wd=cf.exclude_from_wd),
lr=cf.learning_rate[0])
else:
logger.info("Using AdamW optimizer.")
optimizer = torch.optim.AdamW(utils.parse_params_for_optim(net, weight_decay=cf.weight_decay,
exclude_from_wd=cf.exclude_from_wd),
lr=cf.learning_rate[0])
if cf.dynamic_lr_scheduling:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=cf.scheduling_mode, factor=cf.lr_decay_factor,
patience=cf.scheduling_patience)
model_selector = utils.ModelSelector(cf, logger)
train_evaluator = Evaluator(cf, logger, mode='train')
val_evaluator = Evaluator(cf, logger, mode=cf.val_mode)
starting_epoch = 1
# prepare monitoring
monitor_metrics = utils.prepare_monitoring(cf)
if cf.resume:
checkpoint_path = os.path.join(cf.fold_dir, "last_checkpoint")
starting_epoch, net, optimizer, monitor_metrics = \
utils.load_checkpoint(checkpoint_path, net, optimizer)
logger.info('resumed from checkpoint {} to epoch {}'.format(checkpoint_path, starting_epoch))
####### Use this to create hdf5
logger.info('loading dataset and initializing batch generators...')
print ("Start data loading...",time.time())
batch_gen = data_loader.get_train_generators(cf, logger)
print ("Finished batch gen data loading...",time.time())
####### Writing out train data to file
#train_data = dict()
#print ('Write training data to json')
#for bix in range(cf.num_train_batches):
# batch = next(batch_gen['train'])
# train_data.update(batch)
#with open('train_data.json', 'w') as outfile:
# json.dump(train_data, outfile)
#####################################
for epoch in range(starting_epoch, cf.num_epochs + 1):
logger.info('starting training epoch {}'.format(epoch))
start_time = time.time()
net.train()
train_results_list = []
for bix in range(cf.num_train_batches):
######### Insert call to grab right training data fold from hdf5
print ("Grab next batch from batch gen data loader ...",time.time())
##Stalled
batch = next(batch_gen['train']) ######## Instead of this line, grab a batch from training data fold
tic_fw = time.time()
print ("Start forward pass...",time.time())
results_dict = net.train_forward(batch)
tic_bw = time.time()
optimizer.zero_grad()
print ("Start backward pass..",time.time())
results_dict['torch_loss'].backward()
print ("Start optimizing...",time.time())
optimizer.step()
print('\rtr. batch {0}/{1} (ep. {2}) fw {3:.2f}s / bw {4:.2f} s / total {5:.2f} s || '.format(
bix + 1, cf.num_train_batches, epoch, tic_bw - tic_fw, time.time() - tic_bw,
time.time() - tic_fw) + results_dict['logger_string'], flush=True, end="")
train_results_list.append(({k:v for k,v in results_dict.items() if k != "seg_preds"}, batch["pid"]))
print()
_, monitor_metrics['train'] = train_evaluator.evaluate_predictions(train_results_list, monitor_metrics['train'])
logger.info('generating training example plot.')
utils.split_off_process(plot_batch_prediction, batch, results_dict, cf, outfile=os.path.join(
cf.plot_dir, 'pred_example_{}_train.png'.format(cf.fold)))
train_time = time.time() - start_time
logger.info('starting validation in mode {}.'.format(cf.val_mode))
with torch.no_grad():
net.eval()
if cf.do_validation:
val_results_list = []
val_predictor = Predictor(cf, net, logger, mode='val')
for _ in range(batch_gen['n_val']):
########## Insert call to grab right validation data fold from hdf5
batch = next(batch_gen[cf.val_mode])
if cf.val_mode == 'val_patient':
results_dict = val_predictor.predict_patient(batch)
elif cf.val_mode == 'val_sampling':
results_dict = net.train_forward(batch, is_validation=True)
#val_results_list.append([results_dict['boxes'], batch['pid']])
val_results_list.append(({k:v for k,v in results_dict.items() if k != "seg_preds"}, batch["pid"]))
_, monitor_metrics['val'] = val_evaluator.evaluate_predictions(val_results_list, monitor_metrics['val'])
model_selector.run_model_selection(net, optimizer, monitor_metrics, epoch)
# update monitoring and prediction plots
monitor_metrics.update({"lr":
{str(g): group['lr'] for (g, group) in enumerate(optimizer.param_groups)}})
logger.metrics2tboard(monitor_metrics, global_step=epoch)
epoch_time = time.time() - start_time
logger.info('trained epoch {}: took {} ({} train / {} val)'.format(
epoch, utils.get_formatted_duration(epoch_time, "ms"), utils.get_formatted_duration(train_time, "ms"),
utils.get_formatted_duration(epoch_time-train_time, "ms")))
########### Insert call to grab right validation data fold from hdf5
batch = next(batch_gen['val_sampling'])
results_dict = net.train_forward(batch, is_validation=True)
logger.info('generating validation-sampling example plot.')
utils.split_off_process(plot_batch_prediction, batch, results_dict, cf, outfile=os.path.join(
cf.plot_dir, 'pred_example_{}_val.png'.format(cf.fold)))
# -------------- scheduling -----------------
if cf.dynamic_lr_scheduling:
scheduler.step(monitor_metrics["val"][cf.scheduling_criterion][-1])
else:
for param_group in optimizer.param_groups:
param_group['lr'] = cf.learning_rate[epoch-1]
def test(logger):
"""
perform testing for a given fold (or hold out set). save stats in evaluator.
"""
logger.info('starting testing model of fold {} in exp {}'.format(cf.fold, cf.exp_dir))
net = model.net(cf, logger).cuda()
test_predictor = Predictor(cf, net, logger, mode='test')
test_evaluator = Evaluator(cf, logger, mode='test')
################ Insert call to grab right test data (fold?) from hdf5
batch_gen = data_loader.get_test_generator(cf, logger)
####code.interact(local=locals())
test_results_list = test_predictor.predict_test_set(batch_gen, return_results=True)
test_evaluator.evaluate_predictions(test_results_list)
test_evaluator.score_test_df()
if __name__ == '__main__':
stime = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', type=str, default='train_test',
help='one out of: train / test / train_test / analysis / create_exp')
parser.add_argument('-f','--folds', nargs='+', type=int, default=None,
help='None runs over all folds in CV. otherwise specify list of folds.')
parser.add_argument('--exp_dir', type=str, default='/path/to/experiment/directory',
help='path to experiment dir. will be created if non existent.')
parser.add_argument('--server_env', default=False, action='store_true',
help='change IO settings to deploy models on a cluster.')
parser.add_argument('--data_dest', type=str, default=None, help="path to final data folder if different from config.")
parser.add_argument('--use_stored_settings', default=False, action='store_true',
help='load configs from existing exp_dir instead of source dir. always done for testing, '
'but can be set to true to do the same for training. useful in job scheduler environment, '
'where source code might change before the job actually runs.')
parser.add_argument('--resume', action="store_true", default=False,
help='if given, resume from checkpoint(s) of the specified folds.')
parser.add_argument('--exp_source', type=str, default='experiments/toy_exp',
help='specifies, from which source experiment to load configs and data_loader.')
parser.add_argument('--no_benchmark', action='store_true', help="Do not use cudnn.benchmark.")
parser.add_argument('--cuda_device', type=int, default=0, help="Index of CUDA device to use.")
parser.add_argument('-d', '--dev', default=False, action='store_true', help="development mode: shorten everything")
args = parser.parse_args()
folds = args.folds
torch.backends.cudnn.benchmark = not args.no_benchmark
########### Creating hdf5
#if args.mode = 'create_hdf5':
# if folds is None:
# folds = range(cf.n_cv_splits)
# for fold in folds:
# create_hdf_foldwise_with_batch_generator_for_train/val/test
if args.mode == 'train' or args.mode == 'train_test':
cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env, args.use_stored_settings)
if args.dev:
folds = [0,1]
cf.batch_size, cf.num_epochs, cf.min_save_thresh, cf.save_n_models = 3 if cf.dim==2 else 1, 1, 0, 2
cf.num_train_batches, cf.num_val_batches, cf.max_val_patients = 5, 1, 1
cf.test_n_epochs = cf.save_n_models
cf.max_test_patients = 2
cf.data_dest = args.data_dest
logger = utils.get_logger(cf.exp_dir, cf.server_env)
logger.info("cudnn benchmark: {}, deterministic: {}.".format(torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic))
logger.info("sending tensors to CUDA device: {}.".format(torch.cuda.get_device_name(args.cuda_device)))
data_loader = utils.import_module('dl', os.path.join(args.exp_source, 'data_loader.py'))
model = utils.import_module('model', cf.model_path)
logger.info("loaded model from {}".format(cf.model_path))
if folds is None:
folds = range(cf.n_cv_splits)
with torch.cuda.device(args.cuda_device):
for fold in folds:
cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold))
cf.fold = fold
cf.resume = args.resume
if not os.path.exists(cf.fold_dir):
os.mkdir(cf.fold_dir)
logger.set_logfile(fold=fold)
train(logger)
cf.resume = False
if args.mode == 'train_test':
test(logger)
#Concatenate test results by detection
if cf.hold_out_test_set == False:
test_frames = [pd.read_pickle(os.path.join(cf.test_dir,f)) for f in os.listdir(cf.test_dir) if '_test_df.pickle' in f]
all_preds = pd.concat(test_frames)
all_preds.to_csv(os.path.join(cf.test_dir,"all_folds_test.csv"))
#Concatenate detection raw boxes across folds
det_frames = [pd.read_pickle(os.path.join(cf.exp_dir,f,'raw_pred_boxes_list.pickle')) for f in os.listdir(cf.exp_dir) if 'fold_' in f]
all_dets=list()
for i in det_frames:
all_dets.extend(i)
with open(os.path.join(cf.exp_dir, 'all_raw_dets.pickle'), 'wb') as handle:
pickle.dump(all_dets, handle)
#Concatenate detection wbc boxes across folds
det_frames = [pd.read_pickle(os.path.join(cf.exp_dir,f,'wbc_pred_boxes_list.pickle')) for f in os.listdir(cf.exp_dir) if 'fold_' in f]
all_dets=list()
for i in det_frames:
all_dets.extend(i)
with open(os.path.join(cf.exp_dir, 'all_wbc_dets.pickle'), 'wb') as handle:
pickle.dump(all_dets, handle)
elif args.mode == 'test':
cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env, is_training=False, use_stored_settings=True)
if args.dev:
folds = [0,1]
cf.test_n_epochs = 2; cf.max_test_patients = 2
cf.data_dest = args.data_dest
logger = utils.get_logger(cf.exp_dir, cf.server_env)
data_loader = utils.import_module('dl', os.path.join(args.exp_source, 'data_loader.py'))
model = utils.import_module('model', cf.model_path)
logger.info("loaded model from {}".format(cf.model_path))
if folds is None:
folds = range(cf.n_cv_splits)
with torch.cuda.device(args.cuda_device):
for fold in folds:
cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold))
cf.fold = fold
logger.set_logfile(fold=fold)
test(logger)
if cf.hold_out_test_set == False:
test_frames = [pd.read_pickle(os.path.join(cf.test_dir,f)) for f in os.listdir(cf.test_dir) if '_test_df.pickle' in f]
all_preds = pd.concat(test_frames)
all_preds.to_csv(os.path.join(cf.test_dir,"all_folds_test.csv"))
#Concatenate detection raw boxes across folds
det_frames = [pd.read_pickle(os.path.join(cf.exp_dir,f,'raw_pred_boxes_list.pickle')) for f in os.listdir(cf.exp_dir) if 'fold_' in f]
all_dets=list()
for i in det_frames:
all_dets.extend(i)
with open(os.path.join(cf.exp_dir, 'all_raw_dets.pickle'), 'wb') as handle:
pickle.dump(all_dets, handle)
#Concatenate detection wbc boxes across folds
det_frames = [pd.read_pickle(os.path.join(cf.exp_dir,f,'wbc_pred_boxes_list.pickle')) for f in os.listdir(cf.exp_dir) if 'fold_' in f]
all_dets=list()
for i in det_frames:
all_dets.extend(i)
with open(os.path.join(cf.exp_dir, 'all_wbc_dets.pickle'), 'wb') as handle:
pickle.dump(all_dets, handle)
# load raw predictions saved by predictor during testing, run aggregation algorithms and evaluation.
elif args.mode == 'analysis':
cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env, is_training=False, use_stored_settings=True)
logger = utils.get_logger(cf.exp_dir, cf.server_env)
if args.dev:
cf.test_n_epochs = 2
if cf.hold_out_test_set and cf.ensemble_folds:
# create and save (unevaluated) predictions across all folds
predictor = Predictor(cf, net=None, logger=logger, mode='analysis')
results_list = predictor.load_saved_predictions(apply_wbc=True)
utils.create_csv_output([(res_dict["boxes"], pid) for res_dict, pid in results_list], cf, logger)
logger.info('starting evaluation...')
cf.fold = 'overall_hold_out'
evaluator = Evaluator(cf, logger, mode='test')
evaluator.evaluate_predictions(results_list)
evaluator.score_test_df()
else:
fold_dirs = sorted([os.path.join(cf.exp_dir, f) for f in os.listdir(cf.exp_dir) if
os.path.isdir(os.path.join(cf.exp_dir, f)) and f.startswith("fold")])
if folds is None:
folds = range(cf.n_cv_splits)
for fold in folds:
cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold))
cf.fold = fold
logger.set_logfile(fold=fold)
if cf.fold_dir in fold_dirs:
predictor = Predictor(cf, net=None, logger=logger, mode='analysis')
results_list = predictor.load_saved_predictions(apply_wbc=True)
logger.info('starting evaluation...')
evaluator = Evaluator(cf, logger, mode='test')
evaluator.evaluate_predictions(results_list)
evaluator.score_test_df()
else:
logger.info("Skipping fold {} since no model parameters found.".format(fold))
# create experiment folder and copy scripts without starting job.
# useful for cloud deployment where configs might change before job actually runs.
elif args.mode == 'create_exp':
cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env, use_stored_settings=False)
logger = utils.get_logger(cf.exp_dir)
logger.info('created experiment directory at {}'.format(cf.exp_dir))
else:
raise RuntimeError('mode specified in args is not implemented...')
t = utils.get_formatted_duration(time.time() - stime)
logger.info("{} total runtime: {}".format(os.path.split(__file__)[1], t))
del logger
| [
"torch.no_grad",
"torch.cuda.get_device_name",
"torch.cuda.device",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
] | 1.4.0 | mkim55/BleedDetection | 93eb5c08ab210b76ae554a5d7b3ffc8bdc7e3180 |
1.2 | from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import sys
from collections import defaultdict
import pandas as pd
import numpy as np
import torch
from torch.utils.data import (DataLoader, SequentialSampler,
TensorDataset)
from tqdm import tqdm
from transformers import BertTokenizer, BertForSequenceClassification
from utils import convert_examples_new, convert_dataset_to_features
from dataset import HumorDetectionDataset
from model import HumorDetectionModel
from sklearn.metrics import f1_score, precision_score, recall_score
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter=",", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class DumbProcessorClean(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_clean.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_clean.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_clean.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class DumbProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_wordnet_amb.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_wordnet_amb.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def load_and_cache_examples(args, tokenizer, ambiguity_fn, task_name):
'''
Loads in a cached file for training and/or builds a cached file for this data
:return:
'''
processors = {
"old": ColaProcessor,
"new_clean": DumbProcessorClean
}
# Build the dataset
task = 'test'
logger.info("Creating features from dataset file at %s", args.data_dir)
if args.old_load:
logger.info('using old data features')
processor = processors[task_name]()
label_list = processor.get_labels()
if args.data_name == 'rJokes':
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_dev_examples(args.data_dir)
features = convert_examples_new(examples, label_list, args.max_seq_length, tokenizer)
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_masks = torch.tensor([f.input_mask for f in features], dtype=torch.long)
token_type_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
labels = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(input_ids, input_masks, token_type_ids, labels)
else:
logger.info("creating features from new dataset")
dataset = HumorDetectionDataset(args.data_dir, args.max_seq_length, task, ambiguity_fn,
use_clean_data=("clean" in task_name))
features = convert_dataset_to_features(dataset, args.max_seq_length, tokenizer)
# convert features to tensor dataset
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_masks = torch.tensor([f.input_mask for f in features], dtype=torch.long)
token_type_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
ambiguity_scores = torch.tensor([f.ambiguity for f in features], dtype=torch.long)
labels = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(input_ids, input_masks, token_type_ids, labels, ambiguity_scores)
logger.info("Features Built.")
return dataset
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def get_metrics(logits, labels):
# import pdb;pdb.set_trace()
outputs = np.argmax(logits, axis=1)
f1 = f1_score(labels, outputs)
prec = precision_score(labels, outputs)
recall = recall_score(labels, outputs)
return f1, prec, recall
def evaluate(args, model, tokenizer, ambiguity_fn, task_name):
eval_data = load_and_cache_examples(args, tokenizer, ambiguity_fn, task_name)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_data))
logger.info(" Batch size = %d", args.eval_batch_size)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
full_logits = None
full_labels = None
printed_first = False
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'token_type_ids': batch[2],
'attention_mask': batch[1],
'labels': batch[3]}
if not printed_first:
for i in range(3):
print("Tokens: ", tokenizer.convert_ids_to_tokens(inputs["input_ids"][i]))
print("Token type ids: ", inputs["token_type_ids"][i])
print("Attn mask: ", inputs["attention_mask"][i])
print("Label: ", inputs["labels"][i])
printed_first = True
if not args.bert_base:
inputs['ambiguity_scores'] = batch[4]
with torch.no_grad():
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
logits = logits.detach().cpu().numpy()
label_ids = inputs['labels'].to('cpu').numpy()
# combine the labels for F1 scores
if full_labels is None:
full_labels = label_ids
else:
full_labels = np.append(full_labels, label_ids, axis=0)
if full_logits is None:
full_logits = logits
else:
full_logits = np.append(full_logits, logits, axis=0)
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += inputs['input_ids'].size(0)
nb_eval_steps += 1
eval_f1, eval_precision, eval_recall = get_metrics(full_logits, full_labels)
full_accuracy = accuracy(full_logits, full_labels)
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
results = {
'acc' : eval_accuracy,
'precision' : eval_precision,
'recall' : eval_recall,
'f1' : eval_f1,
'loss' : eval_loss
}
return results
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
# Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=16,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--old_load', action='store_true')
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument("--overwrite_cache", action='store_true')
parser.add_argument('--bert_base', action='store_true', default=False,
help='loads in bert-base instead of our custom model.')
parser.add_argument('--model_weights', required=True, help="Path to model weights, if loading a saved model. "
"If you wish to evaluate multiple models, separate with commas (no spaces). "
"Models must differ ONLY in random seed and/or ambiguity_fn.")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
ambiguity_fn = "none"
if "_csi_" in args.model_weights:
ambiguity_fn = "csi"
elif "_wn_" in args.model_weights:
ambiguity_fn = "wn"
elif "_tf-idf_" in args.model_weights:
ambiguity_fn = "tf-idf"
if args.bert_base:
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=2).to(args.device)
else:
use_ambiguity = ambiguity_fn != "none"
model = HumorDetectionModel(rnn_size=768, use_ambiguity=use_ambiguity).to(args.device)
# Loop through 3 Test sets
out_class = None
task_name = 'new_clean'
datasets = ['rJokes']#, 'puns']#, 'short_jokes']
base_dir = args.data_dir
output = []
for data_dir in datasets:
if data_dir == 'rJokes':
args.data_dir = base_dir
task_name = 'new_clean'
else:
args.data_dir = os.path.join(base_dir, data_dir)
task_name = 'old'
args.data_name = data_dir
set_results = defaultdict(float)
logger.info('****** Evaluating on {}'.format(data_dir))
seeds = args.model_weights.split(",")
for weights_path in seeds:
state_dict = torch.load(weights_path)
model.load_state_dict(state_dict)
print(f"Evaluating model: {weights_path}")
results = evaluate(args, model, tokenizer, ambiguity_fn, task_name)
out_class = weights_path
# update rolling
for metric, vals in results.items():
set_results[metric] += vals
# average
logger.info('***** Averaged Results for {}'.format(data_dir))
for metric, vals in set_results.items():
set_results[metric] = vals / len(seeds)
logger.info('***** {}: {}'.format(metric, set_results[metric]))
output.append([data_dir, set_results['acc'], set_results['precision'],
set_results['recall'], set_results['f1'], set_results['loss']])
# Write output to file
save_dir = 'test_results'
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
#table = pd.DataFrame(output, columns=['name', 'acc', 'precision', 'recall', 'f1', 'loss']).set_index('name')
#out_file = 'test_results_{}'.format(out_class[:-2])
#table.to_csv(os.path.join(save_dir, out_file))
return
if __name__ == "__main__":
main()
| [
"torch.no_grad",
"torch.utils.data.SequentialSampler",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.load",
"torch.utils.data.TensorDataset"
] | 1.2.0 | derosejf/RedditHumorDetection | 48d93df7df82a8cab0fb3981a74b4c9448e7b555 |
1.0 | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import os
import warnings
from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from .activations import ACT2FN
from .configuration_gpt2 import GPT2Config
from .file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from .modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from .modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from .utils import logging
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "GPT2Config"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"gpt2",
"gpt2-medium",
"gpt2-large",
"gpt2-xl",
"distilgpt2",
# See all GPT-2 models at https://huggingface.co/models?filter=gpt2
]
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.is_cross_attention = is_cross_attention
if self.is_cross_attention:
self.c_attn = Conv1D(2 * n_state, nx)
self.q_attn = Conv1D(n_state, nx)
else:
self.c_attn = Conv1D(3 * n_state, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / (float(v.size(-1)) ** 0.5)
nd, ns = w.size(-2), w.size(-1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
mask = self.bias[:, :, ns - nd : ns, :ns]
w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
if encoder_hidden_states is not None:
assert hasattr(
self, "q_attn"
), "If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`."
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
else:
present = (None,)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
hidden_size = config.n_embd
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = Attention(hidden_size, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = MLP(inner_dim, config)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
attn_outputs = self.attn(
self.ln_1(hidden_states),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + hidden_states
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attn_outputs = self.crossattention(
self.ln_cross_attn(hidden_states),
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = hidden_states + attn_output
outputs = outputs + cross_attn_outputs[1:] # add cross attentions if we output attention weights
feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))
# residual connection
hidden_states = hidden_states + feed_forward_hidden_states
outputs = [hidden_states] + outputs
return outputs # hidden_states, present, (cross_attentions, attentions)
class GPT2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPT2Config
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class GPT2DoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
Multiple choice classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mc_logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
GPT2_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
GPT2_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
:obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else
``past_key_values[0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be
passed as ``input_ids``.
Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
past_key_values (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
:obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which
have their past given to this model should not be passed as ``input_ids`` as they have already been
computed.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see
:obj:`past_key_values`).
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING,
)
class GPT2Model(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="gpt2",
output_type=BaseModelOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
if "past" in kwargs:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("past")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = [None] * len(self.h)
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
# checkpointing only works with tuple returns, not with lists
return tuple(output for output in module(*inputs, use_cache, output_attentions))
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
layer_past,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present = outputs[:2]
if use_cache is True:
presents = presents + (present,)
if output_attentions:
all_attentions = all_attentions + (outputs[2],)
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
GPT2_START_DOCSTRING,
)
class GPT2LMHeadModel(GPT2PreTrainedModel):
authorized_missing_keys = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
}
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="gpt2",
output_type=CausalLMOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
if "past" in kwargs:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("past")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
input embeddings, the classification head takes as input the input of a specified classification token index in the
input sequence).
""",
GPT2_START_DOCSTRING,
)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
}
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
labels=None,
mc_labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -
1[``.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see
`input_ids` above)
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Return:
Example::
>>> import torch
>>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
>>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
>>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2, return_dict=True)
>>> # Add a [CLS] to the vocabulary (we should train it also!)
>>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})
>>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> encoded_choices = [tokenizer.encode(s) for s in choices]
>>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
>>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
>>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
>>> lm_logits = outputs.lm_logits
>>> mc_logits = outputs.mc_logits
"""
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("lm_labels")
if "past" in kwargs:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("past")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
mc_loss = None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
lm_loss = None
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return ((lm_loss,) + output) if lm_loss is not None else output
return GPT2DoubleHeadsModelOutput(
loss=lm_loss,
mc_loss=mc_loss,
logits=lm_logits,
mc_logits=mc_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a sequence classification head on top (linear layer).
:class:`~transformers.GPT2ForSequenceClassification` uses the last token in order to do the classification, as
other causal models (e.g. GPT-1) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
:obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each
row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take
the last value in each row of the batch).
""",
GPT2_START_DOCSTRING,
)
class GPT2ForSequenceClassification(GPT2PreTrainedModel):
authorized_missing_keys = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPT2Model(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.init_weights()
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="microsoft/dialogrpt",
output_type=SequenceClassifierOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[range(batch_size), sequence_lengths]
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(pooled_logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.LayerNorm",
"torch.nn.MSELoss",
"torch.nn.Softmax",
"torch.arange",
"torch.nn.CrossEntropyLoss",
"torch.ne",
"torch.from_numpy",
"torch.ones",
"torch.tensor",
"torch.matmul",
"torch.nn.Embedding"
] | 1.0 | gp201/transformers | 89f2781e87e92b04303f7f128107718e44e755ed |
1.0 | from typing import Optional, Union, List, Callable
import logging
import torch
from torch.distributions import Poisson, Gamma, Bernoulli, Normal
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from scvi.inference import Posterior
from . import UnsupervisedTrainer
from scvi.dataset import GeneExpressionDataset
from scvi.models import TOTALVI, Classifier
from scvi.models.utils import one_hot
logger = logging.getLogger(__name__)
class TotalPosterior(Posterior):
r"""The functional data unit for totalVI. A `TotalPosterior` instance is instantiated with a model and
a `gene_dataset`, and as well as additional arguments that for Pytorch's `DataLoader`. A subset of indices
can be specified, for purposes such as splitting the data into train/test/validation. Each trainer instance of the `TotalTrainer` class can therefore have multiple
`TotalPosterior` instances to train a model. A `TotalPosterior` instance also comes with many methods or
utilities for its corresponding data.
:param model: A model instance from class ``TOTALVI``
:param gene_dataset: A gene_dataset instance like ``CbmcDataset()`` with attribute ``protein_expression``
:param shuffle: Specifies if a `RandomSampler` or a `SequentialSampler` should be used
:param indices: Specifies how the data should be split with regards to train/test or labelled/unlabelled
:param use_cuda: Default: ``True``
:param data_loader_kwarg: Keyword arguments to passed into the `DataLoader`
Examples:
Let us instantiate a `trainer`, with a gene_dataset and a model
>>> gene_dataset = CbmcDataset()
>>> totalvi = TOTALVI(gene_dataset.nb_genes, len(gene_dataset.protein_names),
... n_batch=gene_dataset.n_batches, use_cuda=True)
>>> trainer = TotalTrainer(vae, gene_dataset)
>>> trainer.train(n_epochs=400)
"""
def __init__(
self,
model: TOTALVI,
gene_dataset: GeneExpressionDataset,
shuffle: bool = False,
indices: Optional[np.ndarray] = None,
use_cuda: bool = True,
data_loader_kwargs=dict(),
):
super().__init__(
model,
gene_dataset,
shuffle=shuffle,
indices=indices,
use_cuda=use_cuda,
data_loader_kwargs=data_loader_kwargs,
)
# Add protein tensor as another tensor to be loaded
self.data_loader_kwargs.update(
{
"collate_fn": gene_dataset.collate_fn_builder(
{"protein_expression": np.float32}
)
}
)
self.data_loader = DataLoader(gene_dataset, **self.data_loader_kwargs)
def corrupted(self):
return self.update(
{
"collate_fn": self.gene_dataset.collate_fn_builder(
{"protein_expression": np.float32}, corrupted=True
)
}
)
def uncorrupted(self):
return self.update(
{
"collate_fn": self.gene_dataset.collate_fn_builder(
{"protein_expression": np.float32}
)
}
)
@torch.no_grad()
def elbo(self):
elbo = self.compute_elbo(self.model)
return elbo
elbo.mode = "min"
@torch.no_grad()
def reconstruction_error(self, mode="total"):
ll_gene, ll_protein = self.compute_reconstruction_error(self.model)
if mode == "total":
return ll_gene + ll_protein
elif mode == "gene":
return ll_gene
else:
return ll_protein
reconstruction_error.mode = "min"
@torch.no_grad()
def marginal_ll(self, n_mc_samples=1000):
ll = self.compute_marginal_log_likelihood()
return ll
@torch.no_grad()
def get_protein_background_mean(self):
background_mean = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
outputs = self.model.inference(
x, y, batch_index=batch_index, label=label, n_samples=1
)
b_mean = outputs["py_"]["rate_back"]
background_mean += [np.array(b_mean.cpu())]
return np.concatenate(background_mean)
def compute_elbo(self, vae: TOTALVI, **kwargs):
""" Computes the ELBO.
The ELBO is the reconstruction error + the KL divergences
between the variational distributions and the priors.
It differs from the marginal log likelihood.
Specifically, it is a lower bound on the marginal log likelihood
plus a term that is constant with respect to the variational distribution.
It still gives good insights on the modeling of the data, and is fast to compute.
"""
# Iterate once over the posterior and computes the total log_likelihood
elbo = 0
for i_batch, tensors in enumerate(self):
x, local_l_mean, local_l_var, batch_index, labels, y = tensors
(
reconst_loss_gene,
reconst_loss_protein,
kl_div_z,
kl_div_gene_l,
kl_div_back_pro,
) = vae(
x,
y,
local_l_mean,
local_l_var,
batch_index=batch_index,
label=labels,
**kwargs,
)
elbo += torch.sum(
reconst_loss_gene
+ reconst_loss_protein
+ kl_div_z
+ kl_div_gene_l
+ kl_div_back_pro
).item()
n_samples = len(self.indices)
return elbo / n_samples
def compute_reconstruction_error(self, vae: TOTALVI, **kwargs):
r""" Computes log p(x/z), which is the reconstruction error .
Differs from the marginal log likelihood, but still gives good
insights on the modeling of the data, and is fast to compute
This is really a helper function to self.ll, self.ll_protein, etc.
"""
# Iterate once over the posterior and computes the total log_likelihood
log_lkl_gene = 0
log_lkl_protein = 0
for i_batch, tensors in enumerate(self):
x, local_l_mean, local_l_var, batch_index, labels, y = tensors
(
reconst_loss_gene,
reconst_loss_protein,
kl_div_z,
kl_div_l_gene,
kl_div_back_pro,
) = vae(
x,
y,
local_l_mean,
local_l_var,
batch_index=batch_index,
label=labels,
**kwargs,
)
log_lkl_gene += torch.sum(reconst_loss_gene).item()
log_lkl_protein += torch.sum(reconst_loss_protein).item()
n_samples = len(self.indices)
return log_lkl_gene / n_samples, log_lkl_protein / n_samples
def compute_marginal_log_likelihood(
self, n_samples_mc: int = 100, batch_size: int = 96
):
""" Computes a biased estimator for log p(x, y), which is the marginal log likelihood.
Despite its bias, the estimator still converges to the real value
of log p(x, y) when n_samples_mc (for Monte Carlo) goes to infinity
(a fairly high value like 100 should be enough). 5000 is the standard in machine learning publications.
Due to the Monte Carlo sampling, this method is not as computationally efficient
as computing only the reconstruction loss
"""
# Uses MC sampling to compute a tighter lower bound on log p(x)
log_lkl = 0
for i_batch, tensors in enumerate(self.update({"batch_size": batch_size})):
x, local_l_mean, local_l_var, batch_index, labels, y = tensors
to_sum = torch.zeros(x.size()[0], n_samples_mc)
for i in range(n_samples_mc):
# Distribution parameters and sampled variables
outputs = self.model.inference(x, y, batch_index, labels)
qz_m = outputs["qz_m"]
qz_v = outputs["qz_v"]
ql_m = outputs["ql_m"]
ql_v = outputs["ql_v"]
px_ = outputs["px_"]
py_ = outputs["py_"]
log_library = outputs["untran_l"]
# really need not softmax transformed random variable
z = outputs["untran_z"]
log_pro_back_mean = outputs["log_pro_back_mean"]
# Reconstruction Loss
(
reconst_loss_gene,
reconst_loss_protein,
) = self.model.get_reconstruction_loss(x, y, px_, py_)
# Log-probabilities
p_l_gene = (
Normal(local_l_mean, local_l_var.sqrt())
.log_prob(log_library)
.sum(dim=-1)
)
p_z = Normal(0, 1).log_prob(z).sum(dim=-1)
p_mu_back = self.model.back_mean_prior.log_prob(log_pro_back_mean).sum(
dim=-1
)
p_xy_zl = -(reconst_loss_gene + reconst_loss_protein)
q_z_x = Normal(qz_m, qz_v.sqrt()).log_prob(z).sum(dim=-1)
q_l_x = Normal(ql_m, ql_v.sqrt()).log_prob(log_library).sum(dim=-1)
q_mu_back = (
Normal(py_["back_alpha"], py_["back_beta"])
.log_prob(log_pro_back_mean)
.sum(dim=-1)
)
to_sum[:, i] = (
p_z + p_l_gene + p_mu_back + p_xy_zl - q_z_x - q_l_x - q_mu_back
)
batch_log_lkl = torch.logsumexp(to_sum, dim=-1) - np.log(n_samples_mc)
log_lkl += torch.sum(batch_log_lkl).item()
n_samples = len(self.indices)
# The minus sign is there because we actually look at the negative log likelihood
return -log_lkl / n_samples
@torch.no_grad()
def get_latent(self, sample: bool = False):
"""
Output posterior z mean or sample, batch index, and label
:param sample: z mean or z sample
:return: 4-tuple of np.ndarrays, latent, batch_indices, labels, library_gene
"""
latent = []
batch_indices = []
labels = []
library_gene = []
for tensors in self:
x, local_l_mean, local_l_var, batch_index, label, y = tensors
give_mean = not sample
latent += [
self.model.sample_from_posterior_z(
x, y, batch_index, give_mean=give_mean
).cpu()
]
batch_indices += [batch_index.cpu()]
labels += [label.cpu()]
library_gene += [
self.model.sample_from_posterior_l(
x, y, batch_index, give_mean=give_mean
).cpu()
]
return (
np.array(torch.cat(latent)),
np.array(torch.cat(batch_indices)),
np.array(torch.cat(labels)).ravel(),
np.array(torch.cat(library_gene)).ravel(),
)
@torch.no_grad()
def differential_expression_stats(self, M_sampling: int = 100):
raise NotImplementedError
@torch.no_grad()
def generate(
self, n_samples: int = 100, batch_size: int = 64
): # with n_samples>1 return original list/ otherwise sequential
"""
Return samples from posterior predictive. Proteins are concatenated to genes.
:param n_samples: Number of posterior predictive samples
:return: Tuple of posterior samples, original data
"""
original_list = []
posterior_list = []
for tensors in self.update({"batch_size": batch_size}):
x, _, _, batch_index, labels, y = tensors
with torch.no_grad():
outputs = self.model.inference(
x, y, batch_index=batch_index, label=labels, n_samples=n_samples
)
px_ = outputs["px_"]
py_ = outputs["py_"]
pi = 1 / (1 + torch.exp(-py_["mixing"]))
mixing_sample = Bernoulli(pi).sample()
protein_rate = (
py_["rate_fore"] * (1 - mixing_sample)
+ py_["rate_back"] * mixing_sample
)
rate = torch.cat((px_["rate"], protein_rate), dim=-1)
if len(px_["r"].size()) == 2:
px_dispersion = px_["r"]
else:
px_dispersion = torch.ones_like(x) * px_["r"]
if len(py_["r"].size()) == 2:
py_dispersion = py_["r"]
else:
py_dispersion = torch.ones_like(y) * py_["r"]
dispersion = torch.cat((px_dispersion, py_dispersion), dim=-1)
# This gamma is really l*w using scVI manuscript notation
p = rate / (rate + dispersion)
r = dispersion
l_train = Gamma(r, (1 - p) / p).sample()
data = Poisson(l_train).sample().cpu().numpy()
# """
# In numpy (shape, scale) => (concentration, rate), with scale = p /(1 - p)
# rate = (1 - p) / p # = 1/scale # used in pytorch
# """
original_list += [np.array(torch.cat((x, y), dim=-1).cpu())]
posterior_list += [data]
posterior_list[-1] = np.transpose(posterior_list[-1], (1, 2, 0))
return (
np.concatenate(posterior_list, axis=0),
np.concatenate(original_list, axis=0),
)
@torch.no_grad()
def get_sample_dropout(self, n_samples: int = 1, give_mean: bool = True):
""" Zero-inflation mixing component for genes
"""
px_dropouts = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
outputs = self.model.inference(
x, y, batch_index=batch_index, label=label, n_samples=n_samples
)
px_dropout = torch.sigmoid(outputs["px_"]["dropout"])
px_dropouts += [px_dropout.cpu()]
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
px_dropouts = torch.cat(px_dropouts, dim=1)
# (cells, features, samples)
px_dropouts = px_dropouts.permute(1, 2, 0)
else:
px_dropouts = torch.cat(px_dropouts, dim=0)
if give_mean is True and n_samples > 1:
px_dropouts = torch.mean(px_dropouts, dim=-1)
px_dropouts = px_dropouts.cpu().numpy()
return px_dropouts
@torch.no_grad()
def get_sample_mixing(
self,
n_samples: int = 1,
give_mean: bool = True,
transform_batch: Optional[int] = None,
):
""" Returns mixing bernoulli parameter for protein negative binomial mixtures (probability background)
:param n_samples: number of samples from posterior distribution
:param sample_protein_mixing: Sample mixing bernoulli, setting background to zero
:param give_mean: bool, whether to return samples along first axis or average over samples
:param transform_batch: Batches to condition on as integer.
:return: array of probability background
:rtype: :py:class:`np.ndarray`
"""
py_mixings = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
outputs = self.model.inference(
x,
y,
batch_index=batch_index,
label=label,
n_samples=n_samples,
transform_batch=transform_batch,
)
py_mixing = torch.sigmoid(outputs["py_"]["mixing"])
py_mixings += [py_mixing.cpu()]
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
py_mixings = torch.cat(py_mixings, dim=1)
# (cells, features, samples)
py_mixings = py_mixings.permute(1, 2, 0)
else:
py_mixings = torch.cat(py_mixings, dim=0)
if give_mean is True and n_samples > 1:
py_mixings = torch.mean(py_mixings, dim=-1)
py_mixings = py_mixings.cpu().numpy()
return py_mixings
@torch.no_grad()
def get_sample_scale(
self,
transform_batch=None,
eps=0.5,
normalize_pro=False,
sample_bern=True,
include_bg=False,
):
"""Helper function to provide normalized expression for DE testing.
For normalized, denoised expression, please use
`get_normalized_denoised_expression()`
:param transform_batch: Int of batch to "transform" all cells into
:param eps: Prior count to add to protein normalized expression
:param normalize_pro: bool, whether to make protein expression sum to one in a cell
:param include_bg: bool, whether to include the background component of expression
:rtype: :py:class:`np.ndarray`
"""
scales = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
model_scale = self.model.get_sample_scale(
x,
y,
batch_index=batch_index,
label=label,
n_samples=1,
transform_batch=transform_batch,
eps=eps,
normalize_pro=normalize_pro,
sample_bern=sample_bern,
include_bg=include_bg,
)
# prior count for proteins
scales += [torch.cat(model_scale, dim=-1).cpu().numpy()]
return np.concatenate(scales)
@torch.no_grad()
def get_normalized_denoised_expression(
self,
n_samples: int = 1,
give_mean: bool = True,
transform_batch: Optional[Union[int, List[int]]] = None,
sample_protein_mixing: bool = True,
):
"""Returns the tensors of denoised normalized gene and protein expression
:param n_samples: number of samples from posterior distribution
:param sample_protein_mixing: Sample mixing bernoulli, setting background to zero
:param give_mean: bool, whether to return samples along first axis or average over samples
:param transform_batch: Batches to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- list of int, then values are averaged over provided batches.
:return: Denoised genes, denoised proteins
:rtype: 2-tuple of :py:class:`np.ndarray`
"""
scale_list_gene = []
scale_list_pro = []
if (transform_batch is None) or (isinstance(transform_batch, int)):
transform_batch = [transform_batch]
for tensors in self:
x, _, _, batch_index, label, y = tensors
px_scale = torch.zeros_like(x)
py_scale = torch.zeros_like(y)
if n_samples > 1:
px_scale = torch.stack(n_samples * [px_scale])
py_scale = torch.stack(n_samples * [py_scale])
for b in transform_batch:
outputs = self.model.inference(
x,
y,
batch_index=batch_index,
label=label,
n_samples=n_samples,
transform_batch=b,
)
px_scale += outputs["px_"]["scale"]
py_ = outputs["py_"]
# probability of background
protein_mixing = 1 / (1 + torch.exp(-py_["mixing"]))
if sample_protein_mixing is True:
protein_mixing = Bernoulli(protein_mixing).sample()
py_scale += py_["rate_fore"] * (1 - protein_mixing)
px_scale /= len(transform_batch)
py_scale /= len(transform_batch)
scale_list_gene.append(px_scale.cpu())
scale_list_pro.append(py_scale.cpu())
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
scale_list_gene = torch.cat(scale_list_gene, dim=1)
scale_list_pro = torch.cat(scale_list_pro, dim=1)
# (cells, features, samples)
scale_list_gene = scale_list_gene.permute(1, 2, 0)
scale_list_pro = scale_list_pro.permute(1, 2, 0)
else:
scale_list_gene = torch.cat(scale_list_gene, dim=0)
scale_list_pro = torch.cat(scale_list_pro, dim=0)
if give_mean is True and n_samples > 1:
scale_list_gene = torch.mean(scale_list_gene, dim=-1)
scale_list_pro = torch.mean(scale_list_pro, dim=-1)
scale_list_gene = scale_list_gene.cpu().numpy()
scale_list_pro = scale_list_pro.cpu().numpy()
return scale_list_gene, scale_list_pro
@torch.no_grad()
def get_protein_mean(
self,
n_samples: int = 1,
give_mean: bool = True,
transform_batch: Optional[Union[int, List[int]]] = None,
):
"""Returns the tensors of protein mean (with foreground and background)
:param n_samples: number of samples from posterior distribution
:param give_mean: bool, whether to return samples along first axis or average over samples
:param transform_batch: Batches to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- list of int, then values are averaged over provided batches.
:rtype: :py:class:`np.ndarray`
"""
if (transform_batch is None) or (isinstance(transform_batch, int)):
transform_batch = [transform_batch]
rate_list_pro = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
protein_rate = torch.zeros_like(y)
if n_samples > 1:
protein_rate = torch.stack(n_samples * [protein_rate])
for b in transform_batch:
outputs = self.model.inference(
x,
y,
batch_index=batch_index,
label=label,
n_samples=n_samples,
transform_batch=b,
)
py_ = outputs["py_"]
pi = 1 / (1 + torch.exp(-py_["mixing"]))
protein_rate += py_["rate_fore"] * (1 - pi) + py_["rate_back"] * pi
protein_rate /= len(transform_batch)
rate_list_pro.append(protein_rate.cpu())
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
rate_list_pro = torch.cat(rate_list_pro, dim=1)
# (cells, features, samples)
rate_list_pro = rate_list_pro.permute(1, 2, 0)
else:
rate_list_pro = torch.cat(rate_list_pro, dim=0)
if give_mean is True and n_samples > 1:
rate_list_pro = torch.mean(rate_list_pro, dim=-1)
rate_list_pro = rate_list_pro.cpu().numpy()
return rate_list_pro
@torch.no_grad()
def generate_denoised_samples(
self,
n_samples: int = 25,
batch_size: int = 64,
rna_size_factor: int = 1,
transform_batch: Optional[int] = None,
):
""" Return samples from an adjusted posterior predictive. Proteins are concatenated to genes.
:param n_samples: How may samples per cell
:param batch_size: Mini-batch size for sampling. Lower means less GPU memory footprint
:rna_size_factor: size factor for RNA prior to sampling gamma distribution
:transform_batch: int of which batch to condition on for all cells
:return:
"""
posterior_list = []
for tensors in self.update({"batch_size": batch_size}):
x, _, _, batch_index, labels, y = tensors
with torch.no_grad():
outputs = self.model.inference(
x,
y,
batch_index=batch_index,
label=labels,
n_samples=n_samples,
transform_batch=transform_batch,
)
px_ = outputs["px_"]
py_ = outputs["py_"]
pi = 1 / (1 + torch.exp(-py_["mixing"]))
mixing_sample = Bernoulli(pi).sample()
protein_rate = py_["rate_fore"]
rate = torch.cat((rna_size_factor * px_["scale"], protein_rate), dim=-1)
if len(px_["r"].size()) == 2:
px_dispersion = px_["r"]
else:
px_dispersion = torch.ones_like(x) * px_["r"]
if len(py_["r"].size()) == 2:
py_dispersion = py_["r"]
else:
py_dispersion = torch.ones_like(y) * py_["r"]
dispersion = torch.cat((px_dispersion, py_dispersion), dim=-1)
# This gamma is really l*w using scVI manuscript notation
p = rate / (rate + dispersion)
r = dispersion
l_train = Gamma(r, (1 - p) / p).sample()
data = l_train.cpu().numpy()
# make background 0
data[:, :, self.gene_dataset.nb_genes :] = (
data[:, :, self.gene_dataset.nb_genes :]
* (1 - mixing_sample).cpu().numpy()
)
posterior_list += [data]
posterior_list[-1] = np.transpose(posterior_list[-1], (1, 2, 0))
return np.concatenate(posterior_list, axis=0)
@torch.no_grad()
def generate_feature_correlation_matrix(
self,
n_samples: int = 25,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[Union[int, List[int]]] = None,
correlation_mode: str = "spearman",
):
""" Wrapper of `generate_denoised_samples()` to create a gene-protein gene-protein corr matrix
:param n_samples: How may samples per cell
:param batch_size: Mini-batch size for sampling. Lower means less GPU memory footprint
:rna_size_factor: size factor for RNA prior to sampling gamma distribution
:param transform_batch: Batches to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- list of int, then values are averaged over provided batches.
:return:
"""
if (transform_batch is None) or (isinstance(transform_batch, int)):
transform_batch = [transform_batch]
corr_mats = []
for b in transform_batch:
denoised_data = self.generate_denoised_samples(
n_samples=n_samples,
batch_size=batch_size,
rna_size_factor=rna_size_factor,
transform_batch=b,
)
flattened = np.zeros(
(denoised_data.shape[0] * n_samples, denoised_data.shape[1])
)
for i in range(n_samples):
flattened[
denoised_data.shape[0] * (i) : denoised_data.shape[0] * (i + 1)
] = denoised_data[:, :, i]
if correlation_mode == "pearson":
corr_matrix = np.corrcoef(flattened, rowvar=False)
else:
corr_matrix = spearmanr(flattened, axis=0)[0]
corr_mats.append(corr_matrix)
corr_matrix = np.mean(np.stack(corr_mats), axis=0)
return corr_matrix
@torch.no_grad()
def imputation(self, n_samples: int = 1):
""" Gene imputation
"""
imputed_list = []
for tensors in self:
x, _, _, batch_index, label, y = tensors
px_rate = self.model.get_sample_rate(
x, y, batch_index=batch_index, label=label, n_samples=n_samples
)
imputed_list += [np.array(px_rate.cpu())]
imputed_list = np.concatenate(imputed_list)
return imputed_list.squeeze()
@torch.no_grad()
def imputation_list(self, n_samples: int = 1):
""" This code is identical to same function in posterior.py
Except, we use the totalVI definition of `model.get_sample_rate`
"""
original_list = []
imputed_list = []
batch_size = self.data_loader_kwargs["batch_size"] // n_samples
for tensors, corrupted_tensors in zip(
self.uncorrupted().sequential(batch_size=batch_size),
self.corrupted().sequential(batch_size=batch_size),
):
batch = tensors[0]
actual_batch_size = batch.size(0)
dropout_x, _, _, batch_index, labels, y = corrupted_tensors
px_rate = self.model.get_sample_rate(
dropout_x, y, batch_index=batch_index, label=labels, n_samples=n_samples
)
px_rate = px_rate[:, : self.gene_dataset.nb_genes]
indices_dropout = torch.nonzero(batch - dropout_x)
if indices_dropout.size() != torch.Size([0]):
i = indices_dropout[:, 0]
j = indices_dropout[:, 1]
batch = batch.unsqueeze(0).expand(
(n_samples, batch.size(0), batch.size(1))
)
original = np.array(batch[:, i, j].view(-1).cpu())
imputed = np.array(px_rate[..., i, j].view(-1).cpu())
cells_index = np.tile(np.array(i.cpu()), n_samples)
original_list += [
original[cells_index == i] for i in range(actual_batch_size)
]
imputed_list += [
imputed[cells_index == i] for i in range(actual_batch_size)
]
else:
original_list = np.array([])
imputed_list = np.array([])
return original_list, imputed_list
@torch.no_grad()
def differential_expression_score(
self,
idx1: Union[List[bool], np.ndarray],
idx2: Union[List[bool], np.ndarray],
mode: Optional[str] = "vanilla",
batchid1: Optional[Union[List[int], np.ndarray]] = None,
batchid2: Optional[Union[List[int], np.ndarray]] = None,
use_observed_batches: Optional[bool] = False,
n_samples: int = 5000,
use_permutation: bool = True,
M_permutation: int = 10000,
all_stats: bool = True,
change_fn: Optional[Union[str, Callable]] = None,
m1_domain_fn: Optional[Callable] = None,
delta: Optional[float] = 0.5,
**kwargs,
) -> pd.DataFrame:
r"""
Unified method for differential expression inference.
This function is an extension of the `get_bayes_factors` method
providing additional genes information to the user
# FUNCTIONING
Two modes coexist:
- the "vanilla" mode follows protocol described in arXiv:1709.02082
In this case, we perform hypothesis testing based on:
M_1: h_1 > h_2
M_2: h_1 <= h_2
DE can then be based on the study of the Bayes factors:
log (p(M_1 | x_1, x_2) / p(M_2 | x_1, x_2)
- the "change" mode (described in bioRxiv, 794289)
consists in estimating an effect size random variable (e.g., log fold-change) and
performing Bayesian hypothesis testing on this variable.
The `change_fn` function computes the effect size variable r based two inputs
corresponding to the normalized means in both populations
Hypotheses:
M_1: r \in R_0 (effect size r in region inducing differential expression)
M_2: r not \in R_0 (no differential expression)
To characterize the region R_0, the user has two choices.
1. A common case is when the region [-delta, delta] does not induce differential
expression.
If the user specifies a threshold delta,
we suppose that R_0 = \mathbb{R} \ [-delta, delta]
2. specify an specific indicator function f: \mathbb{R} -> {0, 1} s.t.
r \in R_0 iff f(r) = 1
Decision-making can then be based on the estimates of
p(M_1 | x_1, x_2)
# POSTERIOR SAMPLING
Both modes require to sample the normalized means posteriors
To that purpose we sample the Posterior in the following way:
1. The posterior is sampled n_samples times for each subpopulation
2. For computation efficiency (posterior sampling is quite expensive), instead of
comparing the obtained samples element-wise, we can permute posterior samples.
Remember that computing the Bayes Factor requires sampling
q(z_A | x_A) and q(z_B | x_B)
# BATCH HANDLING
Currently, the code covers several batch handling configurations:
1. If `use_observed_batches`=True, then batch are considered as observations
and cells' normalized means are conditioned on real batch observations
2. If case (cell group 1) and control (cell group 2) are conditioned on the same
batch ids.
set(batchid1) = set(batchid2):
e.g. batchid1 = batchid2 = None
3. If case and control are conditioned on different batch ids that do not intersect
i.e., set(batchid1) != set(batchid2)
and intersection(set(batchid1), set(batchid2)) = \emptyset
This function does not cover other cases yet and will warn users in such cases.
# PARAMETERS
# Mode parameters
:param mode: one of ["vanilla", "change"]
## Genes/cells/batches selection parameters
:param idx1: bool array masking subpopulation cells 1. Should be True where cell is
from associated population
:param idx2: bool array masking subpopulation cells 2. Should be True where cell is
from associated population
:param batchid1: List of batch ids for which you want to perform DE Analysis for
subpopulation 1. By default, all ids are taken into account
:param batchid2: List of batch ids for which you want to perform DE Analysis for
subpopulation 2. By default, all ids are taken into account
:param use_observed_batches: Whether normalized means are conditioned on observed
batches
## Sampling parameters
:param n_samples: Number of posterior samples
:param use_permutation: Activates step 2 described above.
Simply formulated, pairs obtained from posterior sampling (when calling
`sample_scale_from_batch`) will be randomly permuted so that the number of
pairs used to compute Bayes Factors becomes M_permutation.
:param M_permutation: Number of times we will "mix" posterior samples in step 2.
Only makes sense when use_permutation=True
:param change_fn: function computing effect size based on both normalized means
:param m1_domain_fn: custom indicator function of effect size regions
inducing differential expression
:param delta: specific case of region inducing differential expression.
In this case, we suppose that R \ [-delta, delta] does not induce differential expression
(LFC case)
:param all_stats: whether additional metrics should be provided
:\**kwargs: Other keywords arguments for `get_sample_scale()`
:return: Differential expression properties
"""
all_info = self.get_bayes_factors(
idx1=idx1,
idx2=idx2,
mode=mode,
batchid1=batchid1,
batchid2=batchid2,
use_observed_batches=use_observed_batches,
n_samples=n_samples,
use_permutation=use_permutation,
M_permutation=M_permutation,
change_fn=change_fn,
m1_domain_fn=m1_domain_fn,
delta=delta,
**kwargs,
)
col_names = np.concatenate(
[self.gene_dataset.gene_names, self.gene_dataset.protein_names]
)
if all_stats is True:
nan = np.array([np.nan] * len(self.gene_dataset.protein_names))
(
mean1,
mean2,
nonz1,
nonz2,
norm_mean1,
norm_mean2,
) = self.gene_dataset.raw_counts_properties(idx1, idx2)
mean1_pro = self.gene_dataset.protein_expression[idx1, :].mean(0)
mean2_pro = self.gene_dataset.protein_expression[idx2, :].mean(0)
nonz1_pro = (self.gene_dataset.protein_expression[idx1, :] > 0).mean(0)
nonz2_pro = (self.gene_dataset.protein_expression[idx2, :] > 0).mean(0)
# TODO implement properties for proteins
genes_properties_dict = dict(
raw_mean1=np.concatenate([mean1, mean1_pro]),
raw_mean2=np.concatenate([mean2, mean2_pro]),
non_zeros_proportion1=np.concatenate([nonz1, nonz1_pro]),
non_zeros_proportion2=np.concatenate([nonz2, nonz2_pro]),
raw_normalized_mean1=np.concatenate([norm_mean1, nan]),
raw_normalized_mean2=np.concatenate([norm_mean2, nan]),
)
all_info = {**all_info, **genes_properties_dict}
res = pd.DataFrame(all_info, index=col_names)
sort_key = "proba_de" if mode == "change" else "bayes_factor"
res = res.sort_values(by=sort_key, ascending=False)
return res
@torch.no_grad()
def generate_parameters(self):
raise NotImplementedError
default_early_stopping_kwargs = {
"early_stopping_metric": "elbo",
"save_best_state_metric": "elbo",
"patience": 45,
"threshold": 0,
"reduce_lr_on_plateau": True,
"lr_patience": 30,
"lr_factor": 0.6,
"posterior_class": TotalPosterior,
}
class TotalTrainer(UnsupervisedTrainer):
r"""The VariationalInference class for the unsupervised training of an autoencoder.
Args:
:model: A model instance from class ``TOTALVI``
:gene_dataset: A gene_dataset instance like ``CbmcDataset()`` with attribute ``protein_expression``
:train_size: The train size, either a float between 0 and 1 or and integer for the number of training samples
to use Default: ``0.90``.
:test_size: The test size, either a float between 0 and 1 or and integer for the number of training samples
to use Default: ``0.10``. Note that if train and test do not add to 1 the remainder is placed in a validation set
:pro_recons_weight: Scaling factor on the reconstruction loss for proteins. Default: ``1.0``.
:n_epochs_kl_warmup: Number of epochs for annealing the KL terms for `z` and `mu` of the ELBO (from 0 to 1). If None, no warmup performed, unless
`n_iter_kl_warmup` is set.
:n_iter_kl_warmup: Number of minibatches for annealing the KL terms for `z` and `mu` of the ELBO (from 0 to 1). If set to "auto",
the number of iterations is equal to 75% of the number of cells. `n_epochs_kl_warmup` takes precedence if it is not None. If both are None,
then no warmup is performed.
:\*\*kwargs: Other keywords arguments from the general Trainer class.
"""
default_metrics_to_monitor = ["elbo"]
def __init__(
self,
model,
dataset,
train_size=0.90,
test_size=0.10,
pro_recons_weight=1.0,
n_epochs_kl_warmup=None,
n_iter_kl_warmup="auto",
early_stopping_kwargs=default_early_stopping_kwargs,
discriminator=None,
use_adversarial_loss=False,
kappa=None,
**kwargs,
):
self.n_genes = dataset.nb_genes
self.n_proteins = model.n_input_proteins
self.use_adversarial_loss = use_adversarial_loss
self.kappa = kappa
self.pro_recons_weight = pro_recons_weight
super().__init__(
model,
dataset,
n_epochs_kl_warmup=n_epochs_kl_warmup,
n_iter_kl_warmup=0.75 * len(dataset)
if n_iter_kl_warmup == "auto"
else n_iter_kl_warmup,
early_stopping_kwargs=early_stopping_kwargs,
**kwargs,
)
if use_adversarial_loss is True and discriminator is None:
discriminator = Classifier(
n_input=self.model.n_latent,
n_hidden=32,
n_labels=self.gene_dataset.n_batches,
n_layers=2,
logits=True,
)
self.discriminator = discriminator
if self.use_cuda and self.discriminator is not None:
self.discriminator.cuda()
if type(self) is TotalTrainer:
(
self.train_set,
self.test_set,
self.validation_set,
) = self.train_test_validation(
model, dataset, train_size, test_size, type_class=TotalPosterior
)
self.train_set.to_monitor = []
self.test_set.to_monitor = ["elbo"]
self.validation_set.to_monitor = ["elbo"]
def loss(self, tensors):
(
sample_batch_X,
local_l_mean,
local_l_var,
batch_index,
label,
sample_batch_Y,
) = tensors
(
reconst_loss_gene,
reconst_loss_protein,
kl_div_z,
kl_div_l_gene,
kl_div_back_pro,
) = self.model(
sample_batch_X,
sample_batch_Y,
local_l_mean,
local_l_var,
batch_index,
label,
)
loss = torch.mean(
reconst_loss_gene
+ self.pro_recons_weight * reconst_loss_protein
+ self.kl_weight * kl_div_z
+ kl_div_l_gene
+ self.kl_weight * kl_div_back_pro
)
return loss
def loss_discriminator(
self, z, batch_index, predict_true_class=True, return_details=True
):
n_classes = self.gene_dataset.n_batches
cls_logits = torch.nn.LogSoftmax(dim=1)(self.discriminator(z))
if predict_true_class:
cls_target = one_hot(batch_index, n_classes)
else:
one_hot_batch = one_hot(batch_index, n_classes)
cls_target = torch.zeros_like(one_hot_batch)
# place zeroes where true label is
cls_target.masked_scatter_(
~one_hot_batch.bool(), torch.ones_like(one_hot_batch) / (n_classes - 1)
)
l_soft = cls_logits * cls_target
loss = -l_soft.sum(dim=1).mean()
return loss
def _get_z(self, tensors):
(
sample_batch_X,
local_l_mean,
local_l_var,
batch_index,
label,
sample_batch_Y,
) = tensors
z = self.model.sample_from_posterior_z(
sample_batch_X, sample_batch_Y, batch_index, give_mean=False
)
return z
def train(self, n_epochs=500, lr=4e-3, eps=0.01, params=None):
super().train(n_epochs=n_epochs, lr=lr, eps=eps, params=params)
def on_training_loop(self, tensors_list):
if self.use_adversarial_loss:
if self.kappa is None:
kappa = 1 - self.kl_weight
else:
kappa = self.kappa
batch_index = tensors_list[0][3]
if kappa > 0:
z = self._get_z(*tensors_list)
# Train discriminator
d_loss = self.loss_discriminator(z.detach(), batch_index, True)
d_loss *= kappa
self.d_optimizer.zero_grad()
d_loss.backward()
self.d_optimizer.step()
# Train generative model to fool discriminator
fool_loss = self.loss_discriminator(z, batch_index, False)
fool_loss *= kappa
# Train generative model
self.optimizer.zero_grad()
self.current_loss = loss = self.loss(*tensors_list)
if kappa > 0:
(loss + fool_loss).backward()
else:
loss.backward()
self.optimizer.step()
else:
self.current_loss = loss = self.loss(*tensors_list)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def training_extras_init(self, lr_d=1e-3, eps=0.01):
if self.discriminator is not None:
self.discriminator.train()
d_params = filter(
lambda p: p.requires_grad, self.discriminator.parameters()
)
self.d_optimizer = torch.optim.Adam(d_params, lr=lr_d, eps=eps)
def training_extras_end(self):
if self.discriminator is not None:
self.discriminator.eval()
| [
"torch.cat",
"torch.stack",
"torch.distributions.Bernoulli",
"torch.exp",
"torch.sum",
"torch.sigmoid",
"torch.Size",
"torch.utils.data.DataLoader",
"torch.zeros_like",
"torch.nonzero",
"torch.nn.LogSoftmax",
"torch.distributions.Gamma",
"torch.no_grad",
"torch.optim.Adam",
"torch.distributions.Normal",
"torch.logsumexp",
"torch.ones_like",
"torch.distributions.Poisson",
"torch.mean"
] | 1.0.1 | chenlingantelope/scVI_TSP | c89c35002205b1169a740da06ec691a3d4f4d405 |
1.7 | from functools import partial
import numpy as np
import torch
import torch.nn as nn
from typing import Callable, Union
from alibi_detect.utils.prediction import tokenize_transformer
def predict_batch(x: Union[list, np.ndarray, torch.Tensor], model: Union[Callable, nn.Module, nn.Sequential],
device: torch.device = None, batch_size: int = int(1e10), preprocess_fn: Callable = None,
dtype: Union[np.dtype, torch.dtype] = np.float32) -> Union[np.ndarray, torch.Tensor, tuple]:
"""
Make batch predictions on a model.
Parameters
----------
x
Batch of instances.
model
PyTorch model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array, torch tensor or tuples of those with model outputs.
"""
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
n = len(x)
n_minibatch = int(np.ceil(n / batch_size))
return_np = not isinstance(dtype, torch.dtype)
return_list = False
preds = [] # type: Union[list, tuple]
with torch.no_grad():
for i in range(n_minibatch):
istart, istop = i * batch_size, min((i + 1) * batch_size, n)
x_batch = x[istart:istop]
if isinstance(preprocess_fn, Callable): # type: ignore
x_batch = preprocess_fn(x_batch)
preds_tmp = model(x_batch.to(device)) # type: ignore
if isinstance(preds_tmp, (list, tuple)):
if len(preds) == 0: # init tuple with lists to store predictions
preds = tuple([] for _ in range(len(preds_tmp)))
return_list = isinstance(preds_tmp, list)
for j, p in enumerate(preds_tmp):
if device.type == 'cuda' and isinstance(p, torch.Tensor):
p = p.cpu()
preds[j].append(p if not return_np or isinstance(p, np.ndarray) else p.numpy())
elif isinstance(preds_tmp, (np.ndarray, torch.Tensor)):
if device.type == 'cuda' and isinstance(preds_tmp, torch.Tensor):
preds_tmp = preds_tmp.cpu()
preds.append(preds_tmp if not return_np or isinstance(preds_tmp, np.ndarray) # type: ignore
else preds_tmp.numpy())
else:
raise TypeError(f'Model output type {type(preds_tmp)} not supported. The model output '
f'type needs to be one of list, tuple, np.ndarray or torch.Tensor.')
concat = partial(np.concatenate, axis=0) if return_np else partial(torch.cat, dim=0)
out = tuple(concat(p) for p in preds) if isinstance(preds, tuple) else concat(preds)
if return_list:
out = list(out)
return out
def predict_batch_transformer(x: Union[list, np.ndarray], model: Union[nn.Module, nn.Sequential],
tokenizer: Callable, max_len: int, device: torch.device = None,
batch_size: int = int(1e10), dtype: Union[np.float32, torch.dtype] = np.float32) \
-> Union[np.ndarray, torch.Tensor]:
"""
Make batch predictions using a transformers tokenizer and model.
Parameters
----------
x
Batch of instances.
model
PyTorch model.
tokenizer
Tokenizer for model.
max_len
Max sequence length for tokens.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array or torch tensor with model outputs.
"""
preprocess_fn = partial(tokenize_transformer, tokenizer=tokenizer, max_len=max_len, backend='pt')
return predict_batch(x, model, device=device, preprocess_fn=preprocess_fn, batch_size=batch_size, dtype=dtype)
| [
"torch.cuda.is_available",
"torch.no_grad",
"torch.from_numpy"
] | 1.7.0 | arnaudvl/alibi-detect | 573ef3be3435c834489a7b4f2d23e580c8a0a2a2 |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
from .se_net import senet50_256
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(
self,
backbone: nn.Module,
train_backbone: bool,
num_channels: int,
return_interm_layers: bool,
opt_name=None,
):
super().__init__()
for name, parameter in backbone.named_parameters():
if (
not train_backbone
or "layer2" not in name
and "layer3" not in name
and "layer4" not in name
):
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
if opt_name is None:
return_layers = {"layer4": "0"}
else:
return_layers = None
self.body = backbone
if return_layers is not None:
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(
self,
name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool,
):
if name not in ["senet256"]:
optname = None
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(),
norm_layer=FrozenBatchNorm2d,
)
num_channels = 512 if name in ("resnet18", "resnet34") else 2048
else:
optname = "feat_extract"
backbone = senet50_256(pretrained=is_main_process())
num_channels = 2048
super().__init__(
backbone, train_backbone, num_channels, return_interm_layers, optname
)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor, targets=None):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x, targets).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(
args.backbone, train_backbone, return_interm_layers, args.dilation
)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| [
"torch.zeros",
"torch.ones"
] | 1.5.0 | bjuncek/detr | a1bd3788ca16fb8dc92f7e69b2d801259ecec8f9 |
2.5 | from copy import copy
from typing import List
import numpy as np
import torch
import torch.optim as optim
import torchvision.transforms as T
import yaml
from matplotlib.pyplot import fill
import wandb
from attacks.initial_selection import find_initial_w
from utils.wandb import load_model
class AttackConfigParser:
def __init__(self, config_file):
with open(config_file, 'r') as file:
config = yaml.safe_load(file)
self._config = config
def create_target_model(self):
model = load_model(self._config['wandb_target_run'])
model.eval()
self.model = model
return model
def get_target_dataset(self):
api = wandb.Api(timeout=60)
run = api.run(self._config['wandb_target_run'])
return run.config['Dataset'].strip().lower()
def create_evaluation_model(self):
evaluation_model = load_model(self._config['wandb_evaluation_run'])
evaluation_model.eval()
self.evaluation_model = evaluation_model
return evaluation_model
def create_optimizer(self, params, config=None):
if config is None:
config = self._config['attack']['optimizer']
optimizer_config = self._config['attack']['optimizer']
for optimizer_type, args in optimizer_config.items():
if not hasattr(optim, optimizer_type):
raise Exception(
f'{optimizer_type} is no valid optimizer. Please write the type exactly as the PyTorch class'
)
optimizer_class = getattr(optim, optimizer_type)
optimizer = optimizer_class(params, **args)
break
return optimizer
def create_lr_scheduler(self, optimizer):
if not 'lr_scheduler' in self._config['attack']:
return None
scheduler_config = self._config['attack']['lr_scheduler']
for scheduler_type, args in scheduler_config.items():
if not hasattr(optim.lr_scheduler, scheduler_type):
raise Exception(
f'{scheduler_type} is no valid learning rate scheduler. Please write the type exactly as the PyTorch class.'
)
scheduler_class = getattr(optim.lr_scheduler, scheduler_type)
scheduler_instance = scheduler_class(optimizer, **args)
break
return scheduler_instance
def create_candidates(self, generator, target_model, targets):
candidate_config = self._config['candidates']
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if 'candidate_file' in candidate_config:
candidate_file = candidate_config['candidate_file']
w = torch.load(candidate_file)
w = w[:self._config['num_candidates']]
w = w.to(device)
print(f'Loaded {w.shape[0]} candidates from {candidate_file}.')
return w
elif 'candidate_search' in candidate_config:
search_config = candidate_config['candidate_search']
w = find_initial_w(generator=generator,
target_model=target_model,
targets=targets,
seed=self.seed,
**search_config)
print(f'Created {w.shape[0]} candidates randomly in w space.')
else:
raise Exception(f'No valid candidate initialization stated.')
w = w.to(device)
return w
def create_target_vector(self):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
attack_config = self._config['attack']
targets = None
target_classes = attack_config['targets']
num_candidates = self._config['candidates']['num_candidates']
if type(target_classes) is list:
targets = torch.tensor(target_classes)
targets = torch.repeat_interleave(targets, num_candidates)
elif target_classes == 'all':
targets = torch.tensor([i for i in range(self.model.num_classes)])
targets = torch.repeat_interleave(targets, num_candidates)
elif type(target_classes) == int:
targets = torch.full(size=(num_candidates, ),
fill_value=target_classes)
else:
raise Exception(
f' Please specify a target class or state a target vector.')
targets = targets.to(device)
return targets
def create_wandb_config(self):
for _, args in self.optimizer.items():
lr = args['lr']
break
config = {
**self.attack, 'optimizer': self.optimizer,
'lr': lr,
'use_scheduler': 'lr_scheduler' in self._config,
'target_architecture': self.model.architecture,
'target_extended': self.model.wandb_name,
'selection_method': self.final_selection['approach'],
'final_samples': self.final_selection['samples_per_target']
}
if 'lr_scheduler' in self._config:
config['lr_scheduler'] = self.lr_scheduler
return config
def create_attack_transformations(self):
transformation_list = []
if 'transformations' in self._config['attack']:
transformations = self._config['attack']['transformations']
for transform, args in transformations.items():
if not hasattr(T, transform):
raise Exception(
f'{transform} is no valid transformation. Please write the type exactly as the Torchvision class'
)
transformation_class = getattr(T, transform)
transformation_list.append(transformation_class(**args))
if len(transformation_list) > 0:
attack_transformations = T.Compose(transformation_list)
return attack_transformations
return None
@property
def candidates(self):
return self._config['candidates']
@property
def wandb_target_run(self):
return self._config['wandb_target_run']
@property
def logging(self):
return self._config['wandb']['enable_logging']
@property
def wandb_init_args(self):
return self._config['wandb']['wandb_init_args']
@property
def attack(self):
return self._config['attack']
@property
def wandb(self):
return self._config['wandb']
@property
def optimizer(self):
return self._config['attack']['optimizer']
@property
def lr_scheduler(self):
return self._config['attack']['lr_scheduler']
@property
def final_selection(self):
if 'final_selection' in self._config:
return self._config['final_selection']
else:
return None
@property
def stylegan_model(self):
return self._config['stylegan_model']
@property
def seed(self):
return self._config['seed']
@property
def cas_evaluation(self):
return self._config['cas_evaluation']
@property
def dataset(self):
return self._config['dataset']
@property
def fid_evaluation(self):
return self._config['fid_evaluation']
@property
def attack_center_crop(self):
if 'transformations' in self._config['attack']:
if 'CenterCrop' in self._config['attack']['transformations']:
return self._config['attack']['transformations']['CenterCrop']['size']
else:
return None
@property
def attack_resize(self):
if 'transformations' in self._config['attack']:
if 'Resize' in self._config['attack']['transformations']:
return self._config['attack']['transformations']['Resize']['size']
else:
return None
@property
def num_classes(self):
targets = self._config['attack']['targets']
if isinstance(targets, int):
return 1
else:
return len(targets)
@property
def log_progress(self):
if 'log_progress' in self._config['attack']:
return self._config['attack']['log_progress']
else:
return True
| [
"torch.repeat_interleave",
"torch.full",
"torch.cuda.is_available",
"torch.tensor",
"torch.load"
] | 2.5.2 | LukasStruppek/Plug-and-Play-Attacks | f433f97531a5fb3e6f82965ecdde504e0eb1c4ab |
1.3 | import argparse
import torch
import pyro
import pyro.distributions as dist
from pyro.infer.mcmc.api import MCMC
from pyro.infer.mcmc import NUTS
"""
This simple example is intended to demonstrate how to use an LKJ prior with
a multivariate distribution.
It generates entirely random, uncorrelated data, and then attempts to fit a correlation matrix
and vector of variances.
"""
def model(y):
d = y.shape[1]
N = y.shape[0]
options = dict(dtype=y.dtype, device=y.device)
# Vector of variances for each of the d variables
theta = pyro.sample("theta", dist.HalfCauchy(torch.ones(d, **options)))
# Lower cholesky factor of a correlation matrix
eta = torch.ones(1, **options) # Implies a uniform distribution over correlation matrices
L_omega = pyro.sample("L_omega", dist.LKJCorrCholesky(d, eta))
# Lower cholesky factor of the covariance matrix
L_Omega = torch.mm(torch.diag(theta.sqrt()), L_omega)
# For inference with SVI, one might prefer to use torch.bmm(theta.sqrt().diag_embed(), L_omega)
# Vector of expectations
mu = torch.zeros(d, **options)
with pyro.plate("observations", N):
obs = pyro.sample("obs", dist.MultivariateNormal(mu, scale_tril=L_Omega), obs=y)
return obs
def main(args):
y = torch.randn(args.n, args.num_variables).to(dtype=torch.double)
if args.cuda:
y = y.cuda()
nuts_kernel = NUTS(model, jit_compile=False, step_size=1e-5)
MCMC(nuts_kernel, num_samples=args.num_samples,
warmup_steps=args.warmup_steps, num_chains=args.num_chains).run(y)
if __name__ == "__main__":
assert pyro.__version__.startswith('1.0.0')
parser = argparse.ArgumentParser(description="Demonstrate the use of an LKJ Prior")
parser.add_argument("--num-samples", nargs="?", default=200, type=int)
parser.add_argument("--n", nargs="?", default=500, type=int)
parser.add_argument("--num-chains", nargs='?', default=4, type=int)
parser.add_argument("--num-variables", nargs='?', default=5, type=int)
parser.add_argument("--warmup-steps", nargs='?', default=100, type=int)
parser.add_argument("--rng_seed", nargs='?', default=0, type=int)
parser.add_argument("--cuda", action="store_true", default=False)
args = parser.parse_args()
pyro.set_rng_seed(args.rng_seed)
# Enable validation checks
pyro.enable_validation(__debug__)
# work around with the error "RuntimeError: received 0 items of ancdata"
# see https://discuss.pytorch.org/t/received-0-items-of-ancdata-pytorch-0-4-0/19823
torch.multiprocessing.set_sharing_strategy("file_system")
main(args)
| [
"torch.zeros",
"torch.randn",
"torch.multiprocessing.set_sharing_strategy",
"torch.ones"
] | 1.3.0 | jrmcornish/pyro | 38914d5eb596dc140e226031534ff4ea7903dc35 |
1.8 | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch import GraphConv, HeteroGraphConv
from openhgnn.models.macro_layer.SemanticConv import SemanticAttention
from ..models.layers import homo_layer_dict
class HeteroGeneralLayer(nn.Module):
'''General wrapper for layers'''
def __init__(self, name, rel_names, dim_in, dim_out, dropout, act=None, has_bn=True,
has_l2norm=False, **kwargs):
super(HeteroGeneralLayer, self).__init__()
self.has_l2norm = has_l2norm
has_bn = has_bn
self.layer = RelationConv(name, rel_names, dim_in, dim_out,
bias=not has_bn, **kwargs)
layer_wrapper = []
if has_bn:
layer_wrapper.append(nn.BatchNorm1d(dim_out))
if dropout > 0:
layer_wrapper.append(nn.Dropout(p=dropout))
if act is not None:
layer_wrapper.append(act)
self.post_layer = nn.Sequential(*layer_wrapper)
def forward(self, g, h_dict):
h_dict = self.layer(g, h_dict)
if self.has_l2norm:
for name, batch_h in h_dict.items():
h_dict[name] = F.normalize(self.post_layer(batch_h), p=2, dim=-1)
return h_dict
class BatchNorm1dNode(nn.Module):
'''General wrapper for layers'''
def __init__(self, dim_in):
super(BatchNorm1dNode, self).__init__()
self.bn = nn.BatchNorm1d(dim_in)
def forward(self, h):
h = self.bn(h)
return h
class RelationConv(nn.Module):
def __init__(self, name, rel_names, dim_in, dim_out, bias=False, **kwargs):
super(RelationConv, self).__init__()
macro_func = kwargs['macro_func']
if macro_func == 'attention':
macro_func = SemanticAttention(dim_out)
self.model = HeteroGraphConv({
rel: homo_layer_dict[name](dim_in, dim_out, bias=bias)
for rel in rel_names
}, aggregate=macro_func)
def forward(self, g, h_dict):
h_dict = self.model(g, h_dict)
return h_dict
| [
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.BatchNorm1d"
] | 1.8.1 | guyuisland/OpenHGNN | ab25b83431fed760136e122b442ca4470eb9522c |
1.4 | """ Activations
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
_has_silu = 'silu' in dir(torch.nn.functional)
if _has_silu:
def nswish(x, inplace: bool = False):
return F.silu(x).mul_(1.676531339) if inplace else F.silu(x).mul(1.676531339)
else:
def nswish(x, inplace: bool = False):
"""Normalized Swish
"""
return x.mul_(x.sigmoid()).mul_(1.676531339) if inplace else x.mul(x.sigmoid()).mul(1.676531339)
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
class NSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(NSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return nswish(x, self.inplace)
def mish(x, inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
NOTE: I don't have a working inplace variant
"""
return x.mul(F.softplus(x).tanh())
class Mish(nn.Module):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
def forward(self, x):
return mish(x)
def sigmoid(x, inplace: bool = False):
return x.sigmoid_() if inplace else x.sigmoid()
# PyTorch has this, but not with a consistent inplace argmument interface
class Sigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
def tanh(x, inplace: bool = False):
return x.tanh_() if inplace else x.tanh()
# PyTorch has this, but not with a consistent inplace argmument interface
class Tanh(nn.Module):
def __init__(self, inplace: bool = False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def hard_swish(x, inplace: bool = False):
inner = F.relu6(x + 3.).div_(6.)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
def hard_mish(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
if inplace:
return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
else:
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_mish(x, self.inplace)
class PReLU(nn.PReLU):
"""Applies PReLU (w/ dummy inplace arg)
"""
def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None:
super(PReLU, self).__init__(num_parameters=num_parameters, init=init)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.prelu(input, self.weight)
def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
return F.gelu(x)
class GELU(nn.Module):
"""Applies the Gaussian Error Linear Units function (w/ dummy inplace arg)
"""
def __init__(self, inplace: bool = False):
super(GELU, self).__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.gelu(input)
| [
"torch.nn.functional.relu6",
"torch.nn.functional.softplus",
"torch.nn.functional.gelu",
"torch.nn.functional.silu",
"torch.nn.functional.prelu"
] | 1.4.0 | scottclowe/pytorch-image-models | 3dbeb84b3e02bf1d5dc289bd9fc0ca5682332956 |
1.1 | import abc
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import utils
class ContinualLearner(nn.Module, metaclass=abc.ABCMeta):
'''Abstract module to add continual learning capabilities to a classifier.'''
def __init__(self):
super().__init__()
#----------------- EWC-specifc parameters -----------------#
self.ewc = False
self.ewc_lambda = 5000 #-> hyperparam: how strong to weigh EWC-loss ("regularisation strength")
self.gamma = 1. #-> hyperparam (online EWC): decay-term for old tasks' contribution to quadratic term
self.online = True #-> "online" (=single quadratic term) or "offline" (=quadratic term per task) EWC
self.fisher_n = None #-> sample size for estimating FI-matrix (if "None", full pass over dataset)
self.emp_FI = False #-> if True, use provided labels to calculate FI ("empirical FI"); else predicted labels
self.EWC_task_count = 0 #-> keeps track of number of quadratic loss terms (for "offline EWC")
#----------------- Distillation-specifc parameters -----------------#
self.distill = False
self.KD_temp = 2.0
#----------------- EWC-specifc functions -----------------#s
def estimate_fisher(self, dataset, allowed_classes=None, collate_fn=None):
'''After completing training on a task, estimate diagonal of Fisher Information matrix.
[dataset]: <DataSet> to be used to estimate FI-matrix
[allowed_classes]: <list> with class-indeces of 'allowed' or 'active' classes'''
# Prepare <dict> to store estimated Fisher Information matrix
est_fisher_info = {}
for n, p in self.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
est_fisher_info[n] = p.detach().clone().zero_()
# Set model to evaluation mode
mode = self.training
self.eval()
# Create data-loader to give batches of size 1
data_loader = utils.get_data_loader(dataset, batch_size=1, cuda=self._is_on_cuda(), collate_fn=collate_fn)
# Estimate the FI-matrix for [self.fisher_n] batches of size 1
for index,(x,y) in enumerate(data_loader):
# break from for-loop if max number of samples has been reached
if self.fisher_n is not None:
if index >= self.fisher_n:
break
# run forward pass of model
x = x.to(self._device())
output = self(x) if allowed_classes is None else self(x)[:, allowed_classes]
if self.emp_FI:
# -use provided label to calculate loglikelihood --> "empirical Fisher":
label = torch.LongTensor([y]) if type(y)==int else y
if allowed_classes is not None:
label = [int(np.where(i == allowed_classes)[0][0]) for i in label.numpy()]
label = torch.LongTensor(label)
label = label.to(self._device())
else:
# -use predicted label to calculate loglikelihood:
label = output.max(1)[1]
# calculate negative log-likelihood
negloglikelihood = F.nll_loss(F.log_softmax(output, dim=1), label)
# Calculate gradient of negative loglikelihood
self.zero_grad()
negloglikelihood.backward()
# Square gradients and keep running sum
for n, p in self.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
if p.grad is not None:
est_fisher_info[n] += p.grad.detach() ** 2
# Normalize by sample size used for estimation
est_fisher_info = {n: p/index for n, p in est_fisher_info.items()}
# Store new values in the network
for n, p in self.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
# -mode (=MAP parameter estimate)
self.register_buffer('{}_EWC_prev_task{}'.format(n, "" if self.online else self.EWC_task_count+1),
p.detach().clone())
# -accuracy (approximated by diagonal Fisher Information matrix)
if self.online and self.EWC_task_count==1:
existing_values = getattr(self, '{}_EWC_estimated_fisher'.format(n))
est_fisher_info[n] += self.gamma * existing_values
self.register_buffer('{}_EWC_estimated_fisher{}'.format(n, "" if self.online else self.EWC_task_count+1),
est_fisher_info[n])
# If "offline EWC", increase task-count (for "online EWC", set it to 1 to indicate EWC-loss can be calculated)
self.EWC_task_count = 1 if self.online else self.EWC_task_count + 1
# Set model back to its initial mode
self.train(mode=mode)
def ewc_loss(self):
'''Calculate EWC-loss.'''
if self.EWC_task_count>0:
losses = []
# If "offline EWC", loop over all previous tasks (if "online EWC", [EWC_task_count]=1 so only 1 iteration)
for task in range(1, self.EWC_task_count+1):
for n, p in self.named_parameters():
if p.requires_grad:
# Retrieve stored mode (MAP estimate) and accuracy (Fisher Information matrix)
n = n.replace('.', '__')
mean = getattr(self, '{}_EWC_prev_task{}'.format(n, "" if self.online else task))
fisher = getattr(self, '{}_EWC_estimated_fisher{}'.format(n, "" if self.online else task))
# If "online EWC", apply decay-term to the running sum of the Fisher Information matrices
fisher = self.gamma*fisher if self.online else fisher
# Calculate EWC-loss
losses.append((fisher * (p-mean)**2).sum())
# Sum EWC-loss from all parameters (and from all tasks, if "offline EWC")
return (1./2)*sum(losses)
else:
# EWC-loss is 0 if there are no stored mode and accuracy yet
return torch.tensor(0., device=self._device())
def _device(self):
return next(self.parameters()).device
def _is_on_cuda(self):
return next(self.parameters()).is_cuda
@abc.abstractmethod
def forward(self, x):
pass | [
"torch.LongTensor",
"torch.nn.functional.log_softmax"
] | 1.1.0 | hillshadow/continual-learning-for-HAR | 21aeb99efa4bebf8f3f9f00f4452a8fd91e20c75 |
1.4 | from typing import Tuple
import pandas as pd
import torch
from genrl.utils.data_bandits.base import DataBasedBandit
from genrl.utils.data_bandits.utils import download_data, fetch_data_with_header
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/census1990-mld/USCensus1990.data.txt"
class CensusDataBandit(DataBasedBandit):
"""A contextual bandit based on the Census dataset.
Source:
https://archive.ics.uci.edu/ml/datasets/US+Census+Data+%281990%29
Args:
path (str, optional): Path to the data. Defaults to "./data/Census/".
download (bool, optional): Whether to download the data. Defaults to False.
force_download (bool, optional): Whether to force download even if file exists.
Defaults to False.
url (Union[str, None], optional): URL to download data from. Defaults to None
which implies use of source URL.
device (str): Device to use for tensor operations.
"cpu" for cpu or "cuda" for cuda. Defaults to "cpu".
Attributes:
n_actions (int): Number of actions available.
context_dim (int): The length of context vector.
len (int): The number of examples (context, reward pairs) in the dataset.
device (torch.device): Device to use for tensor operations.
Raises:
FileNotFoundError: If file is not found at specified path.
"""
def __init__(self, **kwargs):
super(CensusDataBandit, self).__init__(kwargs.get("device", "cpu"))
path = kwargs.get("path", "./data/Census/")
download = kwargs.get("download", None)
force_download = kwargs.get("force_download", None)
url = kwargs.get("url", URL)
if download:
fpath = download_data(path, url, force_download)
self._df = pd.read_csv(fpath)
else:
self._df = fetch_data_with_header(path, "USCensus1990.data.txt")
self.n_actions = len(self._df["dOccup"].unique())
self._context_columns = [
i
for i in range(self._df.shape[1])
if i != self._df.columns.get_loc("dOccup")
]
self.context_dim = self._df.shape[1] - 1
self.len = len(self._df)
def reset(self) -> torch.Tensor:
"""Reset bandit by shuffling indices and get new context.
Returns:
torch.Tensor: Current context selected by bandit.
"""
self._reset()
self.df = self._df.sample(frac=1).reset_index(drop=True)
return self._get_context()
def _compute_reward(self, action: int) -> Tuple[int, int]:
"""Compute the reward for a given action.
Args:
action (int): The action to compute reward for.
Returns:
Tuple[int, int]: Computed reward.
"""
label = self._df["dOccup"].iloc[self.idx]
r = int(label == (action + 1))
return r, 1
def _get_context(self) -> torch.Tensor:
"""Get the vector for current selected context.
Returns:
torch.Tensor: Current context vector.
"""
return torch.tensor(
self._df.iloc[self.idx, self._context_columns],
device=self.device,
dtype=torch.float,
)
| [
"torch.tensor"
] | 1.4.0 | matrig/genrl | 25eb018f18a9a1d0865c16e5233a2a7ccddbfd78 |
1.9 | #!/usr/bin/env python
"""
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import sys
import os
from pathlib import Path
from jsonargparse import (
ArgumentParser,
ActionConfigFile,
ActionParser,
namespace_to_dict,
)
import time
import logging
import multiprocessing
import numpy as np
import torch
import torch.nn as nn
from hyperion.hyp_defs import config_logger, set_float_cpu
from hyperion.torch.utils import open_device
from hyperion.torch.utils import ddp
from hyperion.torch.trainers import XVectorTrainer as Trainer
from hyperion.torch.models import TransformerXVectorV1 as XVec
from hyperion.torch.data import FeatSeqDataset as SD
from hyperion.torch.data import ClassWeightedSeqSampler as Sampler
from hyperion.torch.metrics import CategoricalAccuracy
def init_data(data_rspec, train_list, val_list, num_workers, num_gpus, rank, **kwargs):
sd_args = SD.filter_args(**kwargs)
sampler_args = Sampler.filter_args(**kwargs)
if rank == 0:
logging.info("audio dataset args={}".format(sd_args))
logging.info("sampler args={}".format(sampler_args))
logging.info("init datasets")
train_data = SD(data_rspec, train_list, **sd_args)
val_data = SD(data_rspec, val_list, is_val=True, **sd_args)
if rank == 0:
logging.info("init samplers")
train_sampler = Sampler(train_data, **sampler_args)
val_sampler = Sampler(val_data, **sampler_args)
num_workers_per_gpu = int((num_workers + num_gpus - 1) / num_gpus)
largs = (
{"num_workers": num_workers_per_gpu, "pin_memory": True} if num_gpus > 0 else {}
)
train_loader = torch.utils.data.DataLoader(
train_data, batch_sampler=train_sampler, **largs
)
test_loader = torch.utils.data.DataLoader(
val_data, batch_sampler=val_sampler, **largs
)
return train_loader, test_loader
def init_xvector(num_classes, rank, **kwargs):
xvec_args = XVec.filter_args(**kwargs)
if rank == 0:
logging.info("xvector network args={}".format(xvec_args))
xvec_args["num_classes"] = num_classes
model = XVec(**xvec_args)
if rank == 0:
logging.info("x-vector-model={}".format(model))
return model
def train_xvec(gpu_id, args):
config_logger(args.verbose)
del args.verbose
logging.debug(args)
kwargs = namespace_to_dict(args)
torch.manual_seed(args.seed)
set_float_cpu("float32")
ddp_args = ddp.filter_ddp_args(**kwargs)
device, rank, world_size = ddp.ddp_init(gpu_id, **ddp_args)
kwargs["rank"] = rank
train_loader, test_loader = init_data(**kwargs)
model = init_xvector(train_loader.dataset.num_classes, **kwargs)
trn_args = Trainer.filter_args(**kwargs)
if rank == 0:
logging.info("trainer args={}".format(trn_args))
metrics = {"acc": CategoricalAccuracy()}
trainer = Trainer(
model, device=device, metrics=metrics, ddp=world_size > 1, **trn_args
)
if args.resume:
trainer.load_last_checkpoint()
trainer.fit(train_loader, test_loader)
ddp.ddp_cleanup()
if __name__ == "__main__":
parser = ArgumentParser(description="Train XVector with ResNet encoder")
parser.add_argument("--cfg", action=ActionConfigFile)
parser.add_argument("--data-rspec", required=True)
parser.add_argument("--train-list", required=True)
parser.add_argument("--val-list", required=True)
SD.add_argparse_args(parser)
Sampler.add_class_args(parser)
parser.add_argument(
"--num-workers", type=int, default=5, help="num_workers of data loader"
)
XVec.add_class_args(parser)
Trainer.add_class_args(parser)
ddp.add_ddp_args(parser)
parser.add_argument("--seed", type=int, default=1123581321, help="random seed")
parser.add_argument(
"--resume",
action="store_true",
default=False,
help="resume training from checkpoint",
)
parser.add_argument(
"-v", "--verbose", dest="verbose", default=1, choices=[0, 1, 2, 3], type=int
)
args = parser.parse_args()
try:
gpu_id = int(os.environ["LOCAL_RANK"])
except:
gpu_id = 0
if gpu_id == 0:
try:
config_file = Path(args.exp_path) / "config.yaml"
parser.save(args, str(config_file), format="yaml", overwrite=True)
except:
pass
# torch docs recommend using forkserver
multiprocessing.set_start_method("forkserver")
train_xvec(gpu_id, args)
| [
"torch.manual_seed",
"torch.utils.data.DataLoader"
] | 1.9.0 | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa |
1.0 | from pathlib import Path
import hydra
import torch
from hydra.utils import to_absolute_path
from omegaconf import DictConfig
from torch import nn
from ttslearn.train_util import (
collate_fn_dnntts,
get_epochs_with_optional_tqdm,
save_checkpoint,
setup,
)
from ttslearn.util import make_non_pad_mask
def train_step(model, optimizer, train, in_feats, out_feats, lengths):
optimizer.zero_grad()
# 順伝播
pred_out_feats = model(in_feats, lengths)
# ゼロパディングされた部分を損失の計算に含めないように、マスクを作成
mask = make_non_pad_mask(lengths).unsqueeze(-1).to(in_feats.device)
pred_out_feats = pred_out_feats.masked_select(mask)
out_feats = out_feats.masked_select(mask)
# 損失の計算
loss = nn.MSELoss()(pred_out_feats, out_feats)
# 逆伝播、モデルパラメータの更新
if train:
loss.backward()
optimizer.step()
return loss
def train_loop(
config, logger, device, model, optimizer, lr_scheduler, data_loaders, writer
):
out_dir = Path(to_absolute_path(config.train.out_dir))
best_loss = torch.finfo(torch.float32).max
for epoch in get_epochs_with_optional_tqdm(config.tqdm, config.train.nepochs):
for phase in data_loaders.keys():
train = phase.startswith("train")
model.train() if train else model.eval()
running_loss = 0
for in_feats, out_feats, lengths in data_loaders[phase]:
# NOTE: pytorch の PackedSequence の仕様に合わせるため、系列長の降順にソート
lengths, indices = torch.sort(lengths, dim=0, descending=True)
in_feats, out_feats = (
in_feats[indices].to(device),
out_feats[indices].to(device),
)
loss = train_step(model, optimizer, train, in_feats, out_feats, lengths)
running_loss += loss.item()
ave_loss = running_loss / len(data_loaders[phase])
writer.add_scalar(f"Loss/{phase}", ave_loss, epoch)
if not train and ave_loss < best_loss:
best_loss = ave_loss
save_checkpoint(logger, out_dir, model, optimizer, epoch, is_best=True)
lr_scheduler.step()
if epoch % config.train.checkpoint_epoch_interval == 0:
save_checkpoint(logger, out_dir, model, optimizer, epoch, is_best=False)
save_checkpoint(logger, out_dir, model, optimizer, config.train.nepochs)
@hydra.main(config_path="conf/train_dnntts", config_name="config")
def my_app(config: DictConfig) -> None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model, optimizer, lr_scheduler, data_loaders, writer, logger = setup(
config, device, collate_fn_dnntts
)
train_loop(
config, logger, device, model, optimizer, lr_scheduler, data_loaders, writer
)
if __name__ == "__main__":
my_app()
| [
"torch.finfo",
"torch.sort",
"torch.cuda.is_available",
"torch.nn.MSELoss"
] | 1.0.0 | kunosato-mado/ttslearn | 1230ce8d5256a7438c485a337968ce086620a88e |
1.7 | import argparse
import os
import pickle as pkl
import numpy as np
import torch
from statsmodels.tsa.arima_process import ArmaProcess
from attribution.mask_group import MaskGroup
from attribution.perturbation import GaussianBlur
from baselines.explainers import FO, FP, IG, SVS
from utils.losses import mse
explainers = ["dynamask", "fo", "fp", "ig", "shap"]
def run_experiment(
cv: int = 0,
N_ex: int = 10,
T: int = 50,
N_features: int = 50,
N_select: int = 5,
save_dir: str = "experiments/results/rare_time/",
):
"""Run experiment.
Args:
cv: Do the experiment with different cv to obtain error bars.
N_ex: Number of time series to generate.
T: Length of each time series.
N_features: Number of features in each time series.
N_select: Number of time steps that are truly salient.
save_dir: Directory where the results should be saved.
Return:
None
"""
# Create the saving directory if it does not exist
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Initialize useful variables
random_seed = cv
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.manual_seed(random_seed)
np.random.seed(random_seed)
pert = GaussianBlur(device=device) # We use a Gaussian Blur perturbation operator
# Generate the input data
ar = np.array([2, 0.5, 0.2, 0.1]) # AR coefficients
ma = np.array([2]) # MA coefficients
data_arima = ArmaProcess(ar=ar, ma=ma).generate_sample(nsample=(N_ex, T, N_features), axis=1)
X = torch.tensor(data_arima, device=device, dtype=torch.float32)
# Initialize the saliency tensors
true_saliency = torch.zeros(size=(N_ex, T, N_features), device=device, dtype=torch.int64)
dynamask_saliency = torch.zeros(size=true_saliency.shape, device=device)
fo_saliency = torch.zeros(size=true_saliency.shape, device=device)
fp_saliency = torch.zeros(size=true_saliency.shape, device=device)
ig_saliency = torch.zeros(size=true_saliency.shape, device=device)
shap_saliency = torch.zeros(size=true_saliency.shape, device=device)
for k in range(N_ex): # We compute the attribution for each individual time series
print(f"Now working on example {k + 1}/{N_ex}.")
# The truly salient times are selected randomly
t_rand = np.random.randint(low=0, high=T - N_select)
true_saliency[k, t_rand : t_rand + N_select, int(N_features / 4) : int(3 * N_features / 4)] = 1
x = X[k, :, :]
# The white box only depends on the truly salient features
def f(input):
output = torch.zeros(input.shape, device=input.device)
output[t_rand : t_rand + N_select, int(N_features / 4) : int(3 * N_features / 4)] = input[
t_rand : t_rand + N_select, int(N_features / 4) : int(3 * N_features / 4)
]
output = (output ** 2).sum(dim=-1)
return output
# Dynamask attribution
mask_group = MaskGroup(perturbation=pert, device=device, random_seed=random_seed, verbose=False)
mask_group.fit(
f=f,
X=x,
area_list=np.arange(0.001, 0.051, 0.001),
loss_function=mse,
n_epoch=1000,
size_reg_factor_dilation=1000,
size_reg_factor_init=1,
learning_rate=1,
)
mask = mask_group.get_best_mask()
dynamask_attr = mask.mask_tensor.clone().detach()
dynamask_saliency[k, :, :] = dynamask_attr
# Feature Occlusion attribution
fo = FO(f=f)
fo_attr = fo.attribute(x)
fo_saliency[k, :, :] = fo_attr
# Feature Permutation attribution
fp = FP(f=f)
fp_attr = fp.attribute(x)
fp_saliency[k, :, :] = fp_attr
# Integrated Gradient attribution
ig = IG(f=f)
ig_attr = ig.attribute(x)
ig_saliency[k, :, :] = ig_attr
# Sampling Shapley Value attribution
shap = SVS(f=f)
shap_attr = shap.attribute(x)
shap_saliency[k, :, :] = shap_attr
# Save everything in the directory
with open(os.path.join(save_dir, f"true_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(true_saliency, file)
with open(os.path.join(save_dir, f"dynamask_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(dynamask_saliency, file)
with open(os.path.join(save_dir, f"fo_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(fo_saliency, file)
with open(os.path.join(save_dir, f"fp_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(fp_saliency, file)
with open(os.path.join(save_dir, f"ig_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(ig_saliency, file)
with open(os.path.join(save_dir, f"shap_saliency_{cv}.pkl"), "wb") as file:
pkl.dump(shap_saliency, file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cv", default=0, type=int)
args = parser.parse_args()
run_experiment(cv=args.cv)
| [
"torch.zeros",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.tensor"
] | 1.7.1 | vanderschaarlab/Dynamask | b3b190b2ee79c4ecf6c1302b6a3efe6250f094b8 |
0.4 | """
Assorted utilities for working with neural networks in AllenNLP.
"""
# pylint: disable=too-many-lines
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar
import logging
import copy
import math
import torch
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
T = TypeVar('T')
def has_tensor(obj) -> bool:
"""
Given a possibly complex data structure,
check if it has any torch.Tensors in it.
"""
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
def move_to_device(obj, cuda_device: int):
"""
Given a structure (possibly) containing Tensors on the CPU,
move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
"""
if cuda_device < 0 or not has_tensor(obj):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(cuda_device)
elif isinstance(obj, dict):
return {key: move_to_device(value, cuda_device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, cuda_device) for item in obj]
elif isinstance(obj, tuple):
return tuple([move_to_device(item, cuda_device) for item in obj])
else:
return obj
def clamp_tensor(tensor, minimum, maximum):
"""
Supports sparse and dense tensors.
Returns a tensor with values clamped between the provided minimum and maximum,
without modifying the original tensor.
"""
if tensor.is_sparse:
coalesced_tensor = tensor.coalesce()
# pylint: disable=protected-access
coalesced_tensor._values().clamp_(minimum, maximum)
return coalesced_tensor
else:
return tensor.clamp(minimum, maximum)
def batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]],
remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]:
"""
Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys,
and returns a single dictionary with all tensors with the same key batched together.
Parameters
----------
tensor_dicts : ``List[Dict[str, torch.Tensor]]``
The list of tensor dictionaries to batch.
remove_trailing_dimension : ``bool``
If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being
batched, and remove it if we find it.
"""
key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)
for tensor_dict in tensor_dicts:
for key, tensor in tensor_dict.items():
key_to_tensors[key].append(tensor)
batched_tensors = {}
for key, tensor_list in key_to_tensors.items():
batched_tensor = torch.stack(tensor_list)
if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):
batched_tensor = batched_tensor.squeeze(-1)
batched_tensors[key] = batched_tensor
return batched_tensors
def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):
"""
Compute sequence lengths for each batch element in a tensor using a
binary mask.
Parameters
----------
mask : torch.Tensor, required.
A 2D binary mask of shape (batch_size, sequence_length) to
calculate the per-batch sequence lengths from.
Returns
-------
A torch.LongTensor of shape (batch_size,) representing the lengths
of the sequences in the batch.
"""
return mask.long().sum(-1)
def get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:
"""
Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch
element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if
our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return
``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.
We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
"""
# (batch_size, max_length)
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permuation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device)
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
def get_final_encoder_states(encoder_outputs: torch.Tensor,
mask: torch.Tensor,
bidirectional: bool = False) -> torch.Tensor:
"""
Given the output from a ``Seq2SeqEncoder``, with shape ``(batch_size, sequence_length,
encoding_dim)``, this method returns the final hidden state for each element of the batch,
giving a tensor of shape ``(batch_size, encoding_dim)``. This is not as simple as
``encoder_outputs[:, -1]``, because the sequences could have different lengths. We use the
mask (which has shape ``(batch_size, sequence_length)``) to find the final state for each batch
instance.
Additionally, if ``bidirectional`` is ``True``, we will split the final dimension of the
``encoder_outputs`` into two and assume that the first half is for the forward direction of the
encoder and the second half is for the backward direction. We will concatenate the last state
for each encoder dimension, giving ``encoder_outputs[:, -1, :encoding_dim/2]`` concatenated with
``encoder_outputs[:, 0, encoding_dim/2:]``.
"""
# These are the indices of the last words in the sequences (i.e. length sans padding - 1). We
# are assuming sequences are right padded.
# Shape: (batch_size,)
last_word_indices = mask.sum(1).long() - 1
batch_size, _, encoder_output_dim = encoder_outputs.size()
expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)
# Shape: (batch_size, 1, encoder_output_dim)
final_encoder_output = encoder_outputs.gather(1, expanded_indices)
final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)
if bidirectional:
final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]
final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]
final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)
return final_encoder_output
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor):
"""
Computes and returns an element-wise dropout mask for a given tensor, where
each element in the mask is dropped out with probability dropout_probability.
Note that the mask is NOT applied to the tensor - the tensor is passed to retain
the correct CUDA tensor type for the mask.
Parameters
----------
dropout_probability : float, required.
Probability of dropping a dimension of the input.
tensor_for_masking : torch.Tensor, required.
Returns
-------
A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
This scaling ensures expected values and variances of the output of applying this mask
and the original tensor are the same.
"""
binary_mask = (torch.rand(tensor_for_masking.size()) > dropout_probability).to(tensor_for_masking.device)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
def masked_softmax(vector: torch.Tensor,
mask: torch.Tensor,
dim: int = -1,
memory_efficient: bool = False,
mask_fill_value: float = -1e32) -> torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
If ``memory_efficient`` is set to true, we will simply use a very large negative number for those
masked positions so that the probabilities of those positions would be approximately 0.
This is not accurate in math, but works for most cases and consumes less memory.
In the case that the input vector is completely masked and ``memory_efficient`` is false, this function
returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of
a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function
will treat every element as equal, and do softmax over equal numbers.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
def masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
``torch.nn.functional.log_softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a log_softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular log_softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, the return value of this function is
arbitrary, but not ``nan``. You should be masking the result of whatever computation comes out
of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way
that we deal with this case relies on having single-precision floats; mixing half-precision
floats with fully-masked vectors will likely give you ``nans``.
If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or
lower), the way we handle masking here could mess you up. But if you've got logit values that
extreme, you've got bigger problems than this.
"""
if mask is not None:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# vector + mask.log() is an easy way to zero out masked elements in logspace, but it
# results in nans when the whole vector is masked. We need a very small value instead of a
# zero in the mask for these cases. log(1 + 1e-45) is still basically 0, so we can safely
# just add 1e-45 before calling mask.log(). We use 1e-45 because 1e-46 is so small it
# becomes 0 - this is just the smallest value we can actually use.
vector = vector + (mask + 1e-45).log()
return torch.nn.functional.log_softmax(vector, dim=dim)
def masked_max(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
min_val: float = -1e7) -> torch.Tensor:
"""
To calculate max along certain dimensions on masked values
Parameters
----------
vector : ``torch.Tensor``
The vector to calculate max, assume unmasked parts are already zeros
mask : ``torch.Tensor``
The mask of the vector. It must be broadcastable with vector.
dim : ``int``
The dimension to calculate max
keepdim : ``bool``
Whether to keep dimension
min_val : ``float``
The minimal value for paddings
Returns
-------
A ``torch.Tensor`` of including the maximum values.
"""
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, min_val)
max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)
return max_value
def masked_mean(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
eps: float = 1e-8) -> torch.Tensor:
"""
To calculate mean along certain dimensions on masked values
Parameters
----------
vector : ``torch.Tensor``
The vector to calculate mean.
mask : ``torch.Tensor``
The mask of the vector. It must be broadcastable with vector.
dim : ``int``
The dimension to calculate mean
keepdim : ``bool``
Whether to keep dimension
eps : ``float``
A small value to avoid zero division problem.
Returns
-------
A ``torch.Tensor`` of including the mean values.
"""
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, 0.0)
value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)
value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim)
return value_sum / value_count.clamp(min=eps)
def masked_flip(padded_sequence: torch.Tensor,
sequence_lengths: List[int]) -> torch.Tensor:
"""
Flips a padded tensor along the time dimension without affecting masked entries.
Parameters
----------
padded_sequence : ``torch.Tensor``
The tensor to flip along the time dimension.
Assumed to be of dimensions (batch size, num timesteps, ...)
sequence_lengths : ``torch.Tensor``
A list containing the lengths of each unpadded sequence in the batch.
Returns
-------
A ``torch.Tensor`` of the same shape as padded_sequence.
"""
assert padded_sequence.size(0) == len(sequence_lengths), \
f'sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}'
num_timesteps = padded_sequence.size(1)
flipped_padded_sequence = torch.flip(padded_sequence, [1])
sequences = [flipped_padded_sequence[i, num_timesteps - length:] for i, length in enumerate(sequence_lengths)]
return torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True)
def viterbi_decode(tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations: Optional[List[int]] = None):
"""
Perform Viterbi decoding in log space over a sequence given a transition matrix
specifying pairwise (transition) potentials between tags and a matrix of shape
(sequence_length, num_tags) specifying unary potentials for possible tags per
timestep.
Parameters
----------
tag_sequence : torch.Tensor, required.
A tensor of shape (sequence_length, num_tags) representing scores for
a set of tags over a given sequence.
transition_matrix : torch.Tensor, required.
A tensor of shape (num_tags, num_tags) representing the binary potentials
for transitioning between a given pair of tags.
tag_observations : Optional[List[int]], optional, (default = None)
A list of length ``sequence_length`` containing the class ids of observed
elements in the sequence, with unobserved elements being set to -1. Note that
it is possible to provide evidence which results in degenerate labelings if
the sequences of tags you provide as evidence cannot transition between each
other, or those transitions are extremely unlikely. In this situation we log a
warning, but the responsibility for providing self-consistent evidence ultimately
lies with the user.
Returns
-------
viterbi_path : List[int]
The tag indices of the maximum likelihood tag sequence.
viterbi_score : torch.Tensor
The score of the viterbi path.
"""
sequence_length, num_tags = list(tag_sequence.size())
if tag_observations:
if len(tag_observations) != sequence_length:
raise ConfigurationError("Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}"
.format(sequence_length, tag_observations))
else:
tag_observations = [-1 for _ in range(sequence_length)]
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[0, :])
# Evaluate the scores for all possible paths.
for timestep in range(1, sequence_length):
# Add pairwise potentials to current scores.
summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix
scores, paths = torch.max(summed_potentials, 0)
# If we have an observation for this timestep, use it
# instead of the distribution over tags.
observation = tag_observations[timestep]
# Warn the user if they have passed
# invalid/extremely unlikely evidence.
if tag_observations[timestep - 1] != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
logger.warning("The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!")
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[timestep, :] + scores.squeeze())
path_indices.append(paths.squeeze())
# Construct the most likely sequence backwards.
viterbi_score, best_path = torch.max(path_scores[-1], 0)
viterbi_path = [int(best_path.numpy())]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))
# Reverse the backward path.
viterbi_path.reverse()
return viterbi_path, viterbi_score
def get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor],
num_wrapping_dims: int = 0) -> torch.LongTensor:
"""
Takes the dictionary of tensors produced by a ``TextField`` and returns a mask
with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields``
wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields``
is given by ``num_wrapping_dims``.
If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``.
If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra
dimensions, so the shape will be ``(batch_size, ..., num_tokens)``.
There could be several entries in the tensor dictionary with different shapes (e.g., one for
word ids, one for character ids). In order to get a token mask, we use the tensor in
the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``,
if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``,
and use it for the mask. If instead it has three dimensions, we assume it has shape
``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce
the mask. Most frequently this will be a character id tensor, but it could also be a
featurized representation of each token, etc.
If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask.
TODO(joelgrus): can we change this?
NOTE: Our functions for generating masks create torch.LongTensors, because using
torch.ByteTensors makes it easy to run into overflow errors
when doing mask manipulation, such as summing to get the lengths of sequences - see below.
>>> mask = torch.ones([260]).byte()
>>> mask.sum() # equals 260.
>>> var_mask = torch.autograd.V(mask)
>>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
"""
if "mask" in text_field_tensors:
return text_field_tensors["mask"]
tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()]
tensor_dims.sort(key=lambda x: x[0])
smallest_dim = tensor_dims[0][0] - num_wrapping_dims
if smallest_dim == 2:
token_tensor = tensor_dims[0][1]
return (token_tensor != 0).long()
elif smallest_dim == 3:
character_tensor = tensor_dims[0][1]
return ((character_tensor > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words,
embedding_dim)``. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- ``(batch_size, num_queries, num_words)`` (distribution over words for each query)
- ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
``(batch_size, num_queries, embedding_dim)`` and
``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.
"""
# We'll special-case a few settings here, where there are efficient (but poorly-named)
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def sequence_cross_entropy_with_logits(logits: torch.FloatTensor,
targets: torch.LongTensor,
weights: torch.FloatTensor,
average: str = "batch",
label_smoothing: float = None) -> torch.FloatTensor:
"""
Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
Parameters
----------
logits : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : ``torch.LongTensor``, required.
A ``torch.LongTensor`` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch, sequence_length)
average: str, optional (default = "batch")
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If ``None``, return a vector
of losses per batch element.
label_smoothing : ``float``, optional (default = None)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classification
target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was
the correct label.
Returns
-------
A torch.FloatTensor representing the cross entropy loss.
If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar.
If ``average is None``, the returned loss is a vector of shape (batch_size,).
"""
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of "
"None, 'token', or 'batch'")
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = - log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights.float()
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
return per_batch_loss
def replace_masked_values(tensor: torch.Tensor, mask: torch.Tensor, replace_with: float) -> torch.Tensor:
"""
Replaces all masked values in ``tensor`` with ``replace_with``. ``mask`` must be broadcastable
to the same shape as ``tensor``. We require that ``tensor.dim() == mask.dim()``, as otherwise we
won't know which dimensions of the mask to unsqueeze.
This just does ``tensor.masked_fill()``, except the pytorch method fills in things with a mask
value of 1, where we want the opposite. You can do this in your own code with
``tensor.masked_fill((1 - mask).byte(), replace_with)``.
"""
if tensor.dim() != mask.dim():
raise ConfigurationError("tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim()))
return tensor.masked_fill((1 - mask).byte(), replace_with)
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
"""
# pylint: disable=too-many-return-statements
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
def combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
"""
Combines a list of tensors using element-wise operations and concatenation, specified by a
``combination`` string. The string refers to (1-indexed) positions in the input tensor list,
and looks like ``"1,2,1+2,3-1"``.
We allow the following kinds of combinations: ``x``, ``x*y``, ``x+y``, ``x-y``, and ``x/y``,
where ``x`` and ``y`` are positive integers less than or equal to ``len(tensors)``. Each of
the binary operations is performed elementwise. You can give as many combinations as you want
in the ``combination`` string. For example, for the input string ``"1,2,1*2"``, the result
would be ``[1;2;1*2]``, as you would expect, where ``[;]`` is concatenation along the last
dimension.
If you have a fixed, known way to combine tensors that you use in a model, you should probably
just use something like ``torch.cat([x_tensor, y_tensor, x_tensor * y_tensor])``. This
function adds some complexity that is only necessary if you want the specific combination used
to be `configurable`.
If you want to do any element-wise operations, the tensors involved in each element-wise
operation must have the same shape.
This function also accepts ``x`` and ``y`` in place of ``1`` and ``2`` in the combination
string.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(',')]
return torch.cat(to_concatenate, dim=-1)
def _rindex(sequence: Sequence[T], obj: T) -> int:
"""
Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a
ValueError if there is no such item.
Parameters
----------
sequence : ``Sequence[T]``
obj : ``T``
Returns
-------
zero-based index associated to the position of the last item equal to obj
"""
for i in range(len(sequence) - 1, -1, -1):
if sequence[i] == obj:
return i
raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
def _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return tensors[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
return first_tensor * second_tensor
elif operation == '/':
return first_tensor / second_tensor
elif operation == '+':
return first_tensor + second_tensor
elif operation == '-':
return first_tensor - second_tensor
else:
raise ConfigurationError("Invalid operation: " + operation)
def combine_tensors_and_multiply(combination: str,
tensors: List[torch.Tensor],
weights: torch.nn.Parameter) -> torch.Tensor:
"""
Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
pieces = combination.split(',')
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far:(dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result
def _get_combination_and_multiply(combination: str,
tensors: List[torch.Tensor],
weight: torch.nn.Parameter) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == '/':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == '+':
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == '-':
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
else:
raise ConfigurationError("Invalid operation: " + operation)
def get_combined_dim(combination: str, tensor_dims: List[int]) -> int:
"""
For use with :func:`combine_tensors`. This function computes the resultant dimension when
calling ``combine_tensors(combination, tensors)``, when the tensor dimension is known. This is
necessary for knowing the sizes of weight matrices when building models that use
``combine_tensors``.
Parameters
----------
combination : ``str``
A comma-separated list of combination pieces, like ``"1,2,1*2"``, specified identically to
``combination`` in :func:`combine_tensors`.
tensor_dims : ``List[int]``
A list of tensor dimensions, where each dimension is from the `last axis` of the tensors
that will be input to :func:`combine_tensors`.
"""
if len(tensor_dims) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
return sum([_get_combination_dim(piece, tensor_dims) for piece in combination.split(',')])
def _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:
if combination.isdigit():
index = int(combination) - 1
return tensor_dims[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)
second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)
operation = combination[1]
if first_tensor_dim != second_tensor_dim:
raise ConfigurationError("Tensor dims must match for operation \"{}\"".format(operation))
return first_tensor_dim
def logsumexp(tensor: torch.Tensor,
dim: int = -1,
keepdim: bool = False) -> torch.Tensor:
"""
A numerically stable computation of logsumexp. This is mathematically equivalent to
`tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log
probabilities.
Parameters
----------
tensor : torch.FloatTensor, required.
A tensor of arbitrary size.
dim : int, optional (default = -1)
The dimension of the tensor to apply the logsumexp to.
keepdim: bool, optional (default = False)
Whether to retain a dimension of size one at the dimension we reduce over.
"""
max_score, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - max_score
else:
stable_vec = tensor - max_score.unsqueeze(dim)
return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def flatten_and_batch_shift_indices(indices: torch.Tensor,
sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for :func:`~batched_index_select`. The given ``indices`` of size
``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor, which has size
``(batch_size, sequence_length, embedding_size)``. This function returns a vector that
correctly indexes into the flattened target. The sequence length of the target must be
provided to compute the appropriate offsets.
.. code-block:: python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
Parameters
----------
indices : ``torch.LongTensor``, required.
sequence_length : ``int``, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
Returns
-------
offset_indices : ``torch.LongTensor``
"""
# Shape: (batch_size)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None) -> torch.Tensor:
"""
The given ``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into the sequence
dimension (dimension 2) of the target, which has size ``(batch_size, sequence_length,
embedding_size)``.
This function returns selected values in the target with respect to the provided indices, which
have size ``(batch_size, d_1, ..., d_n, embedding_size)``. This can use the optionally
precomputed :func:`~flattened_indices` with size ``(batch_size * d_1 * ... * d_n)`` if given.
An example use case of this function is looking up the start and end indices of spans in a
sequence tensor. This is used in the
:class:`~allennlp.models.coreference_resolution.CoreferenceResolver`. Model to select
contextual word representations corresponding to the start and end indices of mentions. The key
reason this can't be done with basic torch functions is that we want to be able to use look-up
tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know
a-priori how many spans we are looking up).
Parameters
----------
target : ``torch.Tensor``, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : ``torch.LongTensor``
A tensor of shape (batch_size, ...), where each element is an index into the
``sequence_length`` dimension of the ``target`` tensor.
flattened_indices : Optional[torch.Tensor], optional (default = None)
An optional tensor representing the result of calling :func:~`flatten_and_batch_shift_indices`
on ``indices``. This is helpful in the case that the indices can be flattened once and
cached for many batch lookups.
Returns
-------
selected_targets : ``torch.Tensor``
A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices
extracted from the batch flattened target tensor.
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def flattened_index_select(target: torch.Tensor,
indices: torch.LongTensor) -> torch.Tensor:
"""
The given ``indices`` of size ``(set_size, subset_size)`` specifies subsets of the ``target``
that each of the set_size rows should select. The `target` has size
``(batch_size, sequence_length, embedding_size)``, and the resulting selected tensor has size
``(batch_size, set_size, subset_size, embedding_size)``.
Parameters
----------
target : ``torch.Tensor``, required.
A Tensor of shape (batch_size, sequence_length, embedding_size).
indices : ``torch.LongTensor``, required.
A LongTensor of shape (set_size, subset_size). All indices must be < sequence_length
as this tensor is an index into the sequence_length dimension of the target.
Returns
-------
selected : ``torch.Tensor``, required.
A Tensor of shape (batch_size, set_size, subset_size, embedding_size).
"""
if indices.dim() != 2:
raise ConfigurationError("Indices passed to flattened_index_select had shape {} but "
"only 2 dimensional inputs are supported.".format(indices.size()))
# Shape: (batch_size, set_size * subset_size, embedding_size)
flattened_selected = target.index_select(1, indices.view(-1))
# Shape: (batch_size, set_size, subset_size, embedding_size)
selected = flattened_selected.view(target.size(0), indices.size(0), indices.size(1), -1)
return selected
def get_range_vector(size: int, device: int) -> torch.Tensor:
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def bucket_values(distances: torch.Tensor,
num_identity_buckets: int = 4,
num_total_buckets: int = 10) -> torch.Tensor:
"""
Places the given values (designed for distances) into ``num_total_buckets``semi-logscale
buckets, with ``num_identity_buckets`` of these capturing single values.
The default settings will bucket values into the following buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
Parameters
----------
distances : ``torch.Tensor``, required.
A Tensor of any size, to be bucketed.
num_identity_buckets: int, optional (default = 4).
The number of identity buckets (those only holding a single value).
num_total_buckets : int, (default = 10)
The total number of buckets to bucket values into.
Returns
-------
A tensor of the same shape as the input, containing the indices of the buckets
the values were placed in.
"""
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (num_identity_buckets - 1)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1)
def add_sentence_boundary_token_ids(tensor: torch.Tensor,
mask: torch.Tensor,
sentence_begin_token: Any,
sentence_end_token: Any) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Add begin/end of sentence tokens to the batch of sentences.
Given a batch of sentences with size ``(batch_size, timesteps)`` or
``(batch_size, timesteps, dim)`` this returns a tensor of shape
``(batch_size, timesteps + 2)`` or ``(batch_size, timesteps + 2, dim)`` respectively.
Returns both the new tensor and updated mask.
Parameters
----------
tensor : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)`` or ``(batch_size, timesteps, dim)``
mask : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)``
sentence_begin_token: Any (anything that can be broadcast in torch for assignment)
For 2D input, a scalar with the <S> id. For 3D input, a tensor with length dim.
sentence_end_token: Any (anything that can be broadcast in torch for assignment)
For 2D input, a scalar with the </S> id. For 3D input, a tensor with length dim.
Returns
-------
tensor_with_boundary_tokens : ``torch.Tensor``
The tensor with the appended and prepended boundary tokens. If the input was 2D,
it has shape (batch_size, timesteps + 2) and if the input was 3D, it has shape
(batch_size, timesteps + 2, dim).
new_mask : ``torch.Tensor``
The new mask for the tensor, taking into account the appended tokens
marking the beginning and end of the sentence.
"""
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] + 2
tensor_with_boundary_tokens = tensor.new_zeros(*new_shape)
if len(tensor_shape) == 2:
tensor_with_boundary_tokens[:, 1:-1] = tensor
tensor_with_boundary_tokens[:, 0] = sentence_begin_token
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, j + 1] = sentence_end_token
new_mask = (tensor_with_boundary_tokens != 0).long()
elif len(tensor_shape) == 3:
tensor_with_boundary_tokens[:, 1:-1, :] = tensor
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, 0, :] = sentence_begin_token
tensor_with_boundary_tokens[i, j + 1, :] = sentence_end_token
new_mask = ((tensor_with_boundary_tokens > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("add_sentence_boundary_token_ids only accepts 2D and 3D input")
return tensor_with_boundary_tokens, new_mask
def remove_sentence_boundaries(tensor: torch.Tensor,
mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Remove begin/end of sentence embeddings from the batch of sentences.
Given a batch of sentences with size ``(batch_size, timesteps, dim)``
this returns a tensor of shape ``(batch_size, timesteps - 2, dim)`` after removing
the beginning and end sentence markers. The sentences are assumed to be padded on the right,
with the beginning of each sentence assumed to occur at index 0 (i.e., ``mask[:, 0]`` is assumed
to be 1).
Returns both the new tensor and updated mask.
This function is the inverse of ``add_sentence_boundary_token_ids``.
Parameters
----------
tensor : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps, dim)``
mask : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)``
Returns
-------
tensor_without_boundary_tokens : ``torch.Tensor``
The tensor after removing the boundary tokens of shape ``(batch_size, timesteps - 2, dim)``
new_mask : ``torch.Tensor``
The new mask for the tensor of shape ``(batch_size, timesteps - 2)``.
"""
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] - 2
tensor_without_boundary_tokens = tensor.new_zeros(*new_shape)
new_mask = tensor.new_zeros((new_shape[0], new_shape[1]), dtype=torch.long)
for i, j in enumerate(sequence_lengths):
if j > 2:
tensor_without_boundary_tokens[i, :(j - 2), :] = tensor[i, 1:(j - 1), :]
new_mask[i, :(j - 2)] = 1
return tensor_without_boundary_tokens, new_mask
def add_positional_features(tensor: torch.Tensor,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4):
# pylint: disable=line-too-long
"""
Implements the frequency-based positional encoding described
in `Attention is all you Need
<https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .
Adds sinusoids of different frequencies to a ``Tensor``. A sinusoid of a
different frequency and phase is added to each dimension of the input ``Tensor``.
This allows the attention heads to use absolute and relative positions.
The number of timescales is equal to hidden_dim / 2 within the range
(min_timescale, max_timescale). For each timescale, the two sinusoidal
signals sin(timestep / timescale) and cos(timestep / timescale) are
generated and concatenated along the hidden_dim dimension.
Parameters
----------
tensor : ``torch.Tensor``
a Tensor with shape (batch_size, timesteps, hidden_dim).
min_timescale : ``float``, optional (default = 1.0)
The smallest timescale to use.
max_timescale : ``float``, optional (default = 1.0e4)
The largest timescale to use.
Returns
-------
The input tensor augmented with the sinusoidal frequencies.
"""
_, timesteps, hidden_dim = tensor.size()
timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()
# We're generating both cos and sin frequencies,
# so half for each.
num_timescales = hidden_dim // 2
timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()
log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(num_timescales - 1)
inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)
# Broadcasted multiplication - shape (timesteps, num_timescales)
scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)
# shape (timesteps, 2 * num_timescales)
sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)
if hidden_dim % 2 != 0:
# if the number of dimensions is odd, the cos and sin
# timescales had size (hidden_dim - 1) / 2, so we need
# to add a row of zeros to make up the difference.
sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)
return tensor + sinusoids.unsqueeze(0)
def clone(module: torch.nn.Module, num_copies: int) -> torch.nn.ModuleList:
"""Produce N identical layers."""
return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])
def combine_initial_dims(tensor: torch.Tensor) -> torch.Tensor:
"""
Given a (possibly higher order) tensor of ids with shape
(d1, ..., dn, sequence_length)
Return a view that's (d1 * ... * dn, sequence_length).
If original tensor is 1-d or 2-d, return it as is.
"""
if tensor.dim() <= 2:
return tensor
else:
return tensor.view(-1, tensor.size(-1))
def uncombine_initial_dims(tensor: torch.Tensor, original_size: torch.Size) -> torch.Tensor:
"""
Given a tensor of embeddings with shape
(d1 * ... * dn, sequence_length, embedding_dim)
and the original shape
(d1, ..., dn, sequence_length),
return the reshaped tensor of embeddings with shape
(d1, ..., dn, sequence_length, embedding_dim).
If original size is 1-d or 2-d, return it as is.
"""
if len(original_size) <= 2:
return tensor
else:
view_args = list(original_size) + [tensor.size(-1)]
return tensor.view(*view_args)
| [
"torch.zeros",
"torch.cos",
"torch.cat",
"torch.stack",
"torch.arange",
"torch.max",
"torch.gather",
"torch.nn.utils.rnn.pad_sequence",
"torch.sin",
"torch.cuda.LongTensor",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"torch.zeros_like",
"torch.matmul",
"torch.exp",
"torch.flip",
"torch.sum"
] | 0.4.1 | shellshock1911/allennlp | 79e2cf7b677f84daefaf63acc2a8e3833782de23 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.