version
stringclasses 25
values | code
stringlengths 75
178k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 9
78
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.7 | import logging
from typing import Optional, Union
import numpy as np
import pandas as pd
import torch
from anndata import AnnData
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
import scvi
from scvi import _CONSTANTS
from scvi.data import register_tensor_from_anndata
from scvi.dataloaders import DataSplitter
from scvi.external.cellassign._module import CellAssignModule
from scvi.lightning import TrainingPlan
from scvi.model.base import BaseModelClass, TrainRunner, UnsupervisedTrainingMixin
logger = logging.getLogger(__name__)
B = 10
class CellAssign(UnsupervisedTrainingMixin, BaseModelClass):
"""
Reimplementation of CellAssign for reference-based annotation [Zhang19]_.
Parameters
----------
adata
single-cell AnnData object that has been registered via :func:`~scvi.data.setup_anndata`.
The object should be subset to contain the same genes as the cell type marker dataframe.
cell_type_markers
Binary marker gene DataFrame of genes by cell types. Gene names corresponding to `adata.var_names`
should be in DataFrame index, and cell type labels should be the columns.
size_factor_key
Key in `adata.obs` with continuous valued size factors.
**model_kwargs
Keyword args for :class:`~scvi.external.cellassign.CellAssignModule`
Examples
--------
>>> adata = scvi.data.read_h5ad(path_to_anndata)
>>> marker_gene_mat = pd.read_csv(path_to_marker_gene_csv)
>>> bdata = adata[:, adata.var.index.isin(marker_gene_mat.index)].copy()
>>> scvi.data.setup_anndata(bdata)
>>> model = CellAssign(bdata, marker_gene_mat, size_factor_key='S')
>>> model.train()
>>> predictions = model.predict(bdata)
"""
def __init__(
self,
adata: AnnData,
cell_type_markers: pd.DataFrame,
size_factor_key: str,
**model_kwargs,
):
try:
cell_type_markers = cell_type_markers.loc[adata.var_names]
except KeyError:
raise KeyError(
"Anndata and cell type markers do not contain the same genes."
)
super().__init__(adata)
register_tensor_from_anndata(adata, "_size_factor", "obs", size_factor_key)
self.n_genes = self.summary_stats["n_vars"]
self.cell_type_markers = cell_type_markers
rho = torch.Tensor(cell_type_markers.to_numpy())
n_cats_per_cov = (
self.scvi_setup_dict_["extra_categoricals"]["n_cats_per_key"]
if "extra_categoricals" in self.scvi_setup_dict_
else None
)
x = scvi.data.get_from_registry(adata, _CONSTANTS.X_KEY)
col_means = np.asarray(np.mean(x, 0)).ravel() # (g)
col_means_mu, col_means_std = np.mean(col_means), np.std(col_means)
col_means_normalized = torch.Tensor((col_means - col_means_mu) / col_means_std)
# compute basis means for phi - shape (B)
basis_means = np.linspace(np.min(x), np.max(x), B) # (B)
self.module = CellAssignModule(
n_genes=self.n_genes,
rho=rho,
basis_means=basis_means,
b_g_0=col_means_normalized,
n_batch=self.summary_stats["n_batch"],
n_cats_per_cov=n_cats_per_cov,
n_continuous_cov=self.summary_stats["n_continuous_covs"],
**model_kwargs,
)
self._model_summary_string = (
"CellAssign Model with params: \nn_genes: {}, n_labels: {}"
).format(
self.n_genes,
rho.shape[1],
)
self.init_params_ = self._get_init_params(locals())
@torch.no_grad()
def predict(self) -> pd.DataFrame:
"""Predict soft cell type assignment probability for each cell."""
adata = self._validate_anndata(None)
scdl = self._make_data_loader(adata=adata)
predictions = []
for tensors in scdl:
generative_inputs = self.module._get_generative_input(tensors, None)
outputs = self.module.generative(**generative_inputs)
gamma = outputs["gamma"]
predictions += [gamma.cpu()]
return pd.DataFrame(
np.array(torch.cat(predictions)), columns=self.cell_type_markers.columns
)
def train(
self,
max_epochs: int = 400,
lr: float = 3e-3,
use_gpu: Optional[Union[str, int, bool]] = None,
train_size: float = 0.9,
validation_size: Optional[float] = None,
batch_size: int = 1024,
plan_kwargs: Optional[dict] = None,
early_stopping: bool = True,
early_stopping_patience: int = 15,
early_stopping_min_delta: float = 0.0,
**kwargs,
):
"""
Trains the model.
Parameters
----------
max_epochs
Number of epochs to train for
lr
Learning rate for optimization.
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str), or use CPU (if False).
train_size
Size of training set in the range [0.0, 1.0].
validation_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + validation_size < 1`, the remaining cells belong to a test set.
batch_size
Minibatch size to use during training.
plan_kwargs
Keyword args for :class:`~scvi.lightning.ClassifierTrainingPlan`. Keyword arguments passed to
early_stopping
Adds callback for early stopping on validation_loss
early_stopping_patience
Number of times early stopping metric can not improve over early_stopping_min_delta
early_stopping_min_delta
Threshold for counting an epoch torwards patience
`train()` will overwrite values present in `plan_kwargs`, when appropriate.
**kwargs
Other keyword args for :class:`~scvi.lightning.Trainer`.
"""
update_dict = {"lr": lr, "weight_decay": 1e-10}
if plan_kwargs is not None:
plan_kwargs.update(update_dict)
else:
plan_kwargs = update_dict
if "callbacks" in kwargs:
kwargs["callbacks"] += [ClampCallback()]
else:
kwargs["callbacks"] = [ClampCallback()]
if early_stopping:
early_stopping_callback = [
EarlyStopping(
monitor="elbo_validation",
min_delta=early_stopping_min_delta,
patience=early_stopping_patience,
mode="min",
)
]
if "callbacks" in kwargs:
kwargs["callbacks"] += early_stopping_callback
else:
kwargs["callbacks"] = early_stopping_callback
kwargs["check_val_every_n_epoch"] = 1
if max_epochs is None:
n_cells = self.adata.n_obs
max_epochs = np.min([round((20000 / n_cells) * 400), 400])
plan_kwargs = plan_kwargs if isinstance(plan_kwargs, dict) else dict()
data_splitter = DataSplitter(
self.adata,
train_size=train_size,
validation_size=validation_size,
batch_size=batch_size,
use_gpu=use_gpu,
)
training_plan = TrainingPlan(
self.module, len(data_splitter.train_idx), **plan_kwargs
)
runner = TrainRunner(
self,
training_plan=training_plan,
data_splitter=data_splitter,
max_epochs=max_epochs,
use_gpu=use_gpu,
**kwargs,
)
return runner()
class ClampCallback(Callback):
def __init__(self):
super().__init__()
def on_batch_end(self, trainer, pl_module):
with torch.no_grad():
pl_module.module.delta_log.clamp_(np.log(pl_module.module.min_delta))
super().on_batch_end(trainer, pl_module)
| [
"torch.cat",
"torch.no_grad",
"torch.Tensor"
] | 1.7.1 | giovp/scvi-tools | 9b9370aa502b308f84e3129a7c940a9bea06426b |
1.2 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F #233
import torch.optim as optim
from torchvision import datasets,models,transforms
from PIL import Image
import argparse
from deeprobust.image.attack.fgsm import FGSM
from deeprobust.image.netmodels.CNN import Net
from deeprobust.image.config import attack_params
from deeprobust.image.utils import download_model
def parameter_parser():
parser = argparse.ArgumentParser(description = "Run attack algorithms.")
parser.add_argument("--destination",
default = './trained_models/',
help = "choose destination to load the pretrained models.")
parser.add_argument("--filename",
default = "MNIST_CNN_epoch_20.pt")
return parser.parse_args()
args = parameter_parser() # read argument and creat an argparse object
model = Net()
model.load_state_dict(torch.load(args.destination + args.filename))
model.eval()
print("Finish loading network.")
xx = datasets.MNIST('./', download = False).data[999:1000].to('cuda')
xx = xx.unsqueeze_(1).float()/255
#print(xx.size())
## Set Target
yy = datasets.MNIST('./', download = False).targets[999:1000].to('cuda')
"""
Generate adversarial examples
"""
F1 = FGSM(model, device = "cuda") ### or cuda
AdvExArray = F1.generate(xx, yy, **attack_params['FGSM_MNIST'])
predict0 = model(xx)
predict0= predict0.argmax(dim=1, keepdim=True)
predict1 = model(AdvExArray)
predict1= predict1.argmax(dim=1, keepdim=True)
print("original prediction:")
print(predict0)
print("attack prediction:")
print(predict1)
xx = xx.cpu().detach().numpy()
AdvExArray = AdvExArray.cpu().detach().numpy()
import matplotlib.pyplot as plt
plt.imshow(xx[0,0]*255,cmap='gray',vmin=0,vmax=255)
plt.savefig('./adversary_examples/mnist_advexample_fgsm_ori.png')
plt.imshow(AdvExArray[0,0]*255,cmap='gray',vmin=0,vmax=255)
plt.savefig('./adversary_examples/mnist_advexample_fgsm_adv.png')
| [
"torch.load"
] | 1.2.0 | marblet/DeepRobust | 276a7048aded2cf3a190d3851ffd4587b7d1dd49 |
1.0 | #These models are dowloaded via the repo https://github.com/Cadene/pretrained-models.pytorch
#See licence here: https://github.com/Cadene/pretrained-models.pytorch/blob/master/LICENSE.txt
from torch import nn
from ..learner import model_meta
from ...core import *
pretrainedmodels = try_import('pretrainedmodels')
if not pretrainedmodels:
raise Exception('Error: `pretrainedmodels` is needed. `pip install pretrainedmodels`')
__all__ = ['inceptionv4', 'inceptionresnetv2', 'nasnetamobile', 'dpn92', 'xception_cadene', 'se_resnet50',
'se_resnet101', 'se_resnext50_32x4d', 'senet154', 'pnasnet5large']
def get_model(model_name:str, pretrained:bool, seq:bool=False, pname:str='imagenet', **kwargs):
pretrained = pname if pretrained else None
model = getattr(pretrainedmodels, model_name)(pretrained=pretrained, **kwargs)
return nn.Sequential(*model.children()) if seq else model
def inceptionv4(pretrained:bool=False):
model = get_model('inceptionv4', pretrained)
all_layers = list(model.children())
return nn.Sequential(*all_layers[0], *all_layers[1:])
model_meta[inceptionv4] = {'cut': -2, 'split': lambda m: (m[0][11], m[1])}
def nasnetamobile(pretrained:bool=False):
model = get_model('nasnetamobile', pretrained, num_classes=1000)
model.logits = noop
return nn.Sequential(model)
model_meta[nasnetamobile] = {'cut': noop, 'split': lambda m: (list(m[0][0].children())[8], m[1])}
def pnasnet5large(pretrained:bool=False):
model = get_model('pnasnet5large', pretrained, num_classes=1000)
model.logits = noop
return nn.Sequential(model)
model_meta[pnasnet5large] = {'cut': noop, 'split': lambda m: (list(m[0][0].children())[8], m[1])}
def inceptionresnetv2(pretrained:bool=False): return get_model('inceptionresnetv2', pretrained, seq=True)
def dpn92(pretrained:bool=False): return get_model('dpn92', pretrained, pname='imagenet+5k', seq=True)
def xception_cadene(pretrained=False): return get_model('xception', pretrained, seq=True)
def se_resnet50(pretrained:bool=False): return get_model('se_resnet50', pretrained)
def se_resnet101(pretrained:bool=False): return get_model('se_resnet101', pretrained)
def se_resnext50_32x4d(pretrained:bool=False): return get_model('se_resnext50_32x4d', pretrained)
def se_resnext101_32x4d(pretrained:bool=False): return get_model('se_resnext101_32x4d', pretrained)
def senet154(pretrained:bool=False): return get_model('senet154', pretrained)
model_meta[inceptionresnetv2] = {'cut': -2, 'split': lambda m: (m[0][9], m[1])}
model_meta[dpn92] = {'cut': -1, 'split': lambda m: (m[0][0][16], m[1])}
model_meta[xception_cadene] = {'cut': -1, 'split': lambda m: (m[0][11], m[1])}
model_meta[senet154] = {'cut': -3, 'split': lambda m: (m[0][3], m[1])}
_se_resnet_meta = {'cut': -2, 'split': lambda m: (m[0][3], m[1])}
model_meta[se_resnet50] = _se_resnet_meta
model_meta[se_resnet101] = _se_resnet_meta
model_meta[se_resnext50_32x4d] = _se_resnet_meta
model_meta[se_resnext101_32x4d] = _se_resnet_meta
# TODO: add "resnext101_32x4d" "resnext101_64x4d" after serialization issue is fixed:
# https://github.com/Cadene/pretrained-models.pytorch/pull/128
| [
"torch.nn.Sequential"
] | 1.0.0 | JiahuaWU/fastai | 13a2df812d875abf0558004283392ab40d9bdea1 |
1.9 | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
class NumpyDataset(Dataset):
def __init__(self, data_path, labels_path, transform, three_channels=False):
"""
Loads image NumPy arrays and labels NumPy arrays and applies transforms to them in a memory-efficient manner.
Arguments:
----------
data_path: str
Path to image data of shape [number_of_images, 1, height, width].
labels_path: str
Path to label data of shape [number_of_labels, 1].
transform: Torchvision transforms
Augmentations to be applied on the data.
three_channels: bool
If True the one-channel image is copied over the other channels, creating a three-channel image.
"""
self.data = np.load(data_path, mmap_mode='r')
self.labels = np.load(labels_path, mmap_mode='r')
self.transform = transform
self.three_channels = three_channels
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
data = self.data[idx]
if self.three_channels:
data = np.tile(data, (3, 1, 1)) # copies 2d array to the other 3 channels
data = data.astype(np.float32)
data = self.transform(torch.from_numpy(data))
label = torch.tensor(self.labels[idx], dtype=torch.long)
return data, label
def get_dataloader(data_path, labels_path, augmentations, bs=100, three_channels=False, num_workers=2, shuffle=False):
"""
Creates PyTorch DataLoaders that load image NumPy arrays and labels NumPy arrays and applies transforms to them in a memory-efficient manner.
Arguments:
----------
data_path: str
Path to image data of shape [number_of_images, 1, height, width].
labels_path: str
Path to label data of shape [number_of_labels, 1].
transform: Torchvision transforms
Augmentations to be applied on the data.
bs: int
Batch size for loading the data.
three_channels: bool
If True the one-channel image is copied over the other channels, creating a three-channel image.
num_workers: int
Number of workers to be used (a number too high may slow down loading).
shuffle: bool
If True shuffles the data in the DataLoader.
Some algorithms (Self-Ensemble & AdaMatch) require different DataLoaders that are not shuffled between one another!
"""
# create dataset
dataset = NumpyDataset(data_path, labels_path, augmentations, three_channels)
# create dataloader
dataloader = DataLoader(dataset, shuffle=shuffle, batch_size=bs, num_workers=num_workers)
return dataloader | [
"torch.from_numpy",
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.9.1 | zysymu/Domain-Adaptation-DeepLense | 64d26010d730b4e049a07b58a7234a74de0d292c |
1.3 | import os
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
class BaseModel():
def __init__(self, opt):
self.opt = opt
self.device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')
self.is_train = opt['is_train']
self.schedulers = []
self.optimizers = []
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
pass
def get_current_losses(self):
pass
def print_network(self):
pass
def save(self, label):
pass
def load(self):
pass
def _set_lr(self, lr_groups_l):
''' set learning rate for warmup,
lr_groups_l: list for lr_groups. each for a optimizer'''
for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):
for param_group, lr in zip(optimizer.param_groups, lr_groups):
param_group['lr'] = lr
def _get_init_lr(self):
# get the initial lr, which is set by the scheduler
init_lr_groups_l = []
for optimizer in self.optimizers:
init_lr_groups_l.append([v['initial_lr'] for v in optimizer.param_groups])
return init_lr_groups_l
def update_learning_rate(self, cur_iter, warmup_iter=-1):
for scheduler in self.schedulers:
scheduler.step()
#### set up warm up learning rate
if cur_iter < warmup_iter:
# get initial lr for each group
init_lr_g_l = self._get_init_lr()
# modify warming-up learning rates
warm_up_lr_l = []
for init_lr_g in init_lr_g_l:
warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])
# set learning rate
self._set_lr(warm_up_lr_l)
def get_current_learning_rate(self):
# return self.schedulers[0].get_lr()[0]
return self.optimizers[0].param_groups[0]['lr']
def get_network_description(self, network):
'''Get the string and total parameters of the network'''
if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel):
network = network.module
s = str(network)
n = sum(map(lambda x: x.numel(), network.parameters()))
return s, n
def save_network(self, network, network_label, iter_label):
save_filename = '{}_{}.pth'.format(iter_label, network_label)
save_path = os.path.join(self.opt['path']['models'], save_filename)
if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel):
network = network.module
state_dict = network.state_dict()
for key, param in state_dict.items():
state_dict[key] = param.cpu()
torch.save(state_dict, save_path)
def load_network(self, load_path, network, strict=True):
if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel):
network = network.module
load_net = torch.load(load_path)
load_net_clean = OrderedDict() # remove unnecessary 'module.'
for k, v in load_net.items():
if k.startswith('module.'):
load_net_clean[k[7:]] = v
else:
load_net_clean[k] = v
network.load_state_dict(load_net_clean, strict=strict)
def save_training_state(self, epoch, iter_step):
'''Saves training state during training, which will be used for resuming'''
state = {'epoch': epoch, 'iter': iter_step, 'schedulers': [], 'optimizers': []}
for s in self.schedulers:
state['schedulers'].append(s.state_dict())
for o in self.optimizers:
state['optimizers'].append(o.state_dict())
save_filename = '{}.state'.format(iter_step)
save_path = os.path.join(self.opt['path']['training_state'], save_filename)
torch.save(state, save_path)
def resume_training(self, resume_state):
'''Resume the optimizers and schedulers for training'''
resume_optimizers = resume_state['optimizers']
resume_schedulers = resume_state['schedulers']
assert len(resume_optimizers) == len(self.optimizers), 'Wrong lengths of optimizers'
assert len(resume_schedulers) == len(self.schedulers), 'Wrong lengths of schedulers'
for i, o in enumerate(resume_optimizers):
self.optimizers[i].load_state_dict(o)
for i, s in enumerate(resume_schedulers):
self.schedulers[i].load_state_dict(s)
| [
"torch.device",
"torch.save",
"torch.load"
] | 1.3.1 | fanld/HDRTVNet | 7ef7f25f55b776b4ca9fa2d7936895a42b0960af |
1.7 | import torch
import torch.nn as nn
class CLSA(nn.Module):
def __init__(self, base_encoder, args, dim=128, K=65536, m=0.999, T=0.2, mlp=True):
"""
:param base_encoder: encoder model
:param args: config parameters
:param dim: feature dimension (default: 128)
:param K: queue size; number of negative keys (default: 65536)
:param m: momentum of updating key encoder (default: 0.999)
:param T: softmax temperature (default: 0.2)
:param mlp: use MLP layer to process encoder output or not (default: True)
"""
super(CLSA, self).__init__()
self.args = args
self.K = K
self.m = m
self.T = T
self.T2 = self.args.clsa_t
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = base_encoder(num_classes=dim)
self.encoder_k = base_encoder(num_classes=dim)
if mlp: # hack: brute-force replacement
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0) # normalize across queue instead of each example
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
# config parameters for CLSA stronger augmentation and multi-crop
self.weak_pick = args.pick_weak
self.strong_pick = args.pick_strong
self.weak_pick = set(self.weak_pick)
self.strong_pick = set(self.strong_pick)
self.gpu = args.gpu
self.sym = self.args.sym
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, queue, queue_ptr, keys):
# gather keys before updating queue
#keys = concat_all_gather(keys) #already concatenated before
batch_size = keys.shape[0]
ptr = int(queue_ptr)
assert self.K % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
queue[:, ptr:ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q_list, im_k,im_strong_list):
"""
:param im_q_list: query image list
:param im_k: key image
:param im_strong_list: query strong image list
:return:
weak: logit_list, label_list
strong: logit_list, label_list
"""
if self.sym:
q_list = []
for k, im_q in enumerate(im_q_list): # weak forward
if k not in self.weak_pick:
continue
# can't shuffle because it will stop gradient only can be applied for k
# im_q, idx_unshuffle = self._batch_shuffle_ddp(im_q)
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# q = self._batch_unshuffle_ddp(q, idx_unshuffle)
q_list.append(q)
# add the encoding of im_k as one of weakly supervised
q = self.encoder_q(im_k)
q = nn.functional.normalize(q, dim=1)
q_list.append(q)
q_strong_list = []
for k, im_strong in enumerate(im_strong_list):
# im_strong, idx_unshuffle = self._batch_shuffle_ddp(im_strong)
if k not in self.strong_pick:
continue
q_strong = self.encoder_q(im_strong) # queries: NxC
q_strong = nn.functional.normalize(q_strong, dim=1)
# q_strong = self._batch_unshuffle_ddp(q_strong, idx_unshuffle)
q_strong_list.append(q_strong)
with torch.no_grad(): # no gradient to keys
# if update_key_encoder:
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
k = k.detach()
k = concat_all_gather(k)
k2 = self.encoder_k(im_q_list[0]) # keys: NxC
k2 = nn.functional.normalize(k2, dim=1)
# undo shuffle
k2 = self._batch_unshuffle_ddp(k2, idx_unshuffle)
k2 = k2.detach()
k2 = concat_all_gather(k2)
logits0_list = []
labels0_list = []
logits1_list = []
labels1_list = []
# first iter the 1st k supervised
for choose_idx in range(len(q_list) - 1):
q = q_list[choose_idx]
# positive logits: NxN
l_pos = torch.einsum('nc,ck->nk', [q, k.T])
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
cur_batch_size = logits.shape[0]
cur_gpu = self.gpu
choose_match = cur_gpu * cur_batch_size
labels = torch.arange(choose_match, choose_match + cur_batch_size, dtype=torch.long).cuda()
logits0_list.append(logits)
labels0_list.append(labels)
labels0 = logits.clone().detach() # use previous q as supervision
labels0 = labels0 * self.T / self.T2
labels0 = torch.softmax(labels0, dim=1)
labels0 = labels0.detach()
for choose_idx2 in range(len(q_strong_list)):
q_strong = q_strong_list[choose_idx2]
# weak strong loss
l_pos = torch.einsum('nc,ck->nk', [q_strong, k.T])
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q_strong, self.queue.clone().detach()])
# logits: Nx(1+K)
logits0 = torch.cat([l_pos, l_neg], dim=1) # N*(K+1)
# apply temperature
logits0 /= self.T2
logits0 = torch.softmax(logits0, dim=1)
logits1_list.append(logits0)
labels1_list.append(labels0)
# iter another part, symmetrized
k = k2
for choose_idx in range(1, len(q_list)):
q = q_list[choose_idx]
# positive logits: NxN
l_pos = torch.einsum('nc,ck->nk', [q, k.T])
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
cur_batch_size = logits.shape[0]
cur_gpu = self.gpu
choose_match = cur_gpu * cur_batch_size
labels = torch.arange(choose_match, choose_match + cur_batch_size, dtype=torch.long).cuda()
logits0_list.append(logits)
labels0_list.append(labels)
labels0 = logits.clone().detach() # use previous q as supervision
labels0 = labels0 * self.T / self.T2
labels0 = torch.softmax(labels0, dim=1)
labels0 = labels0.detach()
for choose_idx2 in range(len(q_strong_list)):
q_strong = q_strong_list[choose_idx2]
# weak strong loss
l_pos = torch.einsum('nc,ck->nk', [q_strong, k.T])
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q_strong, self.queue.clone().detach()])
# logits: Nx(1+K)
logits0 = torch.cat([l_pos, l_neg], dim=1) # N*(K+1)
# apply temperature
logits0 /= self.T2
logits0 = torch.softmax(logits0, dim=1)
logits1_list.append(logits0)
labels1_list.append(labels0)
# dequeue and enqueue
# if update_key_encoder==False:
self._dequeue_and_enqueue(self.queue, self.queue_ptr, k)
return logits0_list, labels0_list, logits1_list, labels1_list
else:
q_list = []
for k, im_q in enumerate(im_q_list): # weak forward
if k not in self.weak_pick:
continue
# can't shuffle because it will stop gradient only can be applied for k
# im_q, idx_unshuffle = self._batch_shuffle_ddp(im_q)
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# q = self._batch_unshuffle_ddp(q, idx_unshuffle)
q_list.append(q)
q_strong_list = []
for k, im_strong in enumerate(im_strong_list):
# im_strong, idx_unshuffle = self._batch_shuffle_ddp(im_strong)
if k not in self.strong_pick:
continue
q_strong = self.encoder_q(im_strong) # queries: NxC
q_strong = nn.functional.normalize(q_strong, dim=1)
# q_strong = self._batch_unshuffle_ddp(q_strong, idx_unshuffle)
q_strong_list.append(q_strong)
# compute key features
with torch.no_grad(): # no gradient to keys
# if update_key_encoder:
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
k = k.detach()
k = concat_all_gather(k)
# compute logits
# Einstein sum is more intuitive
logits0_list = []
labels0_list = []
logits1_list = []
labels1_list = []
for choose_idx in range(len(q_list)):
q = q_list[choose_idx]
# positive logits: Nx1
l_pos = torch.einsum('nc,ck->nk', [q, k.T])
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
cur_batch_size = logits.shape[0]
cur_gpu = self.gpu
choose_match = cur_gpu * cur_batch_size
labels = torch.arange(choose_match, choose_match + cur_batch_size, dtype=torch.long).cuda()
logits0_list.append(logits)
labels0_list.append(labels)
labels0 = logits.clone().detach() # use previous q as supervision
labels0 = labels0*self.T/self.T2
labels0 = torch.softmax(labels0, dim=1)
labels0 = labels0.detach()
for choose_idx2 in range(len(q_strong_list)):
q_strong = q_strong_list[choose_idx2]
# weak strong loss
l_pos = torch.einsum('nc,ck->nk', [q_strong, k.T])
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q_strong, self.queue.clone().detach()])
# logits: Nx(1+K)
logits0 = torch.cat([l_pos, l_neg], dim=1) # N*(K+1)
# apply temperature
logits0 /= self.T2
logits0 = torch.softmax(logits0, dim=1)
logits1_list.append(logits0)
labels1_list.append(labels0)
# dequeue and enqueue
# if update_key_encoder==False:
self._dequeue_and_enqueue(self.queue, self.queue_ptr, k)
return logits0_list, labels0_list, logits1_list, labels1_list
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| [
"torch.zeros",
"torch.nn.functional.normalize",
"torch.cat",
"torch.nn.Linear",
"torch.distributed.get_world_size",
"torch.einsum",
"torch.arange",
"torch.no_grad",
"torch.argsort",
"torch.distributed.all_gather",
"torch.randperm",
"torch.softmax",
"torch.nn.ReLU",
"torch.ones_like",
"torch.distributed.get_rank",
"torch.randn",
"torch.distributed.broadcast"
] | 1.7.1 | maple-research-lab/CLSA | 37df76cf5cb032683e57b70a3a4090f0d524c8fd |
1.7 | import torch.nn as nn
import config as c
# custom weights initialization called on netG and netD
# based on Neff et al. 2017 parameters
def weights_init(model):
classname = model.__class__.__name__
if classname.find('Conv') != -1:
mean = 0.0
std = 0.05
nn.init.normal_(model.weight.data, mean, std)
elif classname.find('BatchNorm') != -1:
mean = 0.0
std = 0.05
nn.init.normal_(model.weight.data, mean, std)
nn.init.constant_(model.bias.data, 0)
# Generator block
class Generator(nn.Module):
def __init__(self, activation='relu'):
super(Generator, self).__init__()
self.init_size = c.image_size[0] // 8
self.init_z = c.image_size[-1] // 8
activations = nn.ModuleDict([['lrelu', nn.LeakyReLU(0.2,
inplace=True)],
['relu', nn.ReLU(inplace=True)]])
def upsample_conv_block(in_channels, out_channels,
activation=activation):
if not c.spectral_norm_G:
block = [nn.Upsample(scale_factor=2),
nn.Conv3d(in_channels, out_channels, c.kg, stride=1,
padding=(c.kg-1)//2),
nn.BatchNorm3d(out_channels)]
else:
block = [nn.Upsample(scale_factor=2),
nn.utils.spectral_norm(nn.Conv3d(in_channels,
out_channels,
c.kg, stride=1,
padding=(c.kg-1)//2))
]
block.append(activations[activation])
return block
self.linear1 = nn.Sequential(nn.Linear(c.nz, c.ngf *
(self.init_size ** 2) *
self.init_z))
self.batch1 = nn.Sequential(nn.BatchNorm3d(c.ngf),
activations[activation])
self.layer2 = nn.Sequential(*upsample_conv_block(c.ngf, c.ngf//2))
self.layer3 = nn.Sequential(*upsample_conv_block(c.ngf//2, c.ngf//4))
self.layer4 = nn.Sequential(*upsample_conv_block(c.ngf//4, c.ngf//8))
self.layer5 = nn.Conv3d(c.ngf // 8, c.nc, c.kg, stride=1,
padding=(c.kg-1)//2)
self.activationG = nn.Tanh()
def forward(self, inp):
# print(inp.size())
x = self.linear1(inp.view(inp.size()[0], -1))
x = x.view(x.size()[0], c.ngf, self.init_size, self.init_size,
self.init_z)
x = self.batch1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
gen_image = self.activationG(x)
return gen_image
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def conv_block(in_channels, out_channels, activation='LeakyReLU'):
if not c.spectral_norm_D:
block = [nn.Conv3d(in_channels, out_channels, c.kd, stride=2,
padding=(c.kd-1)//2),
nn.InstanceNorm3d(out_channels)]
else:
block = [nn.utils.spectral_norm(nn.Conv3d(in_channels,
out_channels,
c.kd, stride=2,
padding=(c.kd-1)//2))
]
if activation == 'LeakyReLU':
block.append(nn.LeakyReLU(0.2, inplace=True))
else:
block.append(nn.ReLU(inplace=True))
return block
self.layer1 = nn.Sequential(*conv_block(c.nc, c.ndf//8))
self.layer2 = nn.Sequential(*conv_block(c.ndf//8, c.ndf//4))
self.layer3 = nn.Sequential(*conv_block(c.ndf//4, c.ndf//2))
self.layer4 = nn.Sequential(*conv_block(c.ndf//2, c.ndf))
self.layer5 = nn.Linear(c.ndf * 8 * 8 * 4, 1)
def forward(self, inp):
x = self.layer1(inp)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
disc_out = self.layer5(x.view(x.size()[0], -1))
return disc_out
| [
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.nn.init.normal_",
"torch.nn.Conv3d",
"torch.nn.InstanceNorm3d",
"torch.nn.BatchNorm3d"
] | 1.7.0 | pooja-subramaniam/3DGAN_synthesis_of_3D_TOF_MRA_with_segmentation_labels | c1142594b3fa0afce46436350b3f80efbb82d7b5 |
1.6 | import math
from typing import Any
import numpy as np
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from pl_bolts.utils import _SKLEARN_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
if _SKLEARN_AVAILABLE:
from sklearn.utils import shuffle as sk_shuffle
else: # pragma: no cover
warn_missing_pkg("sklearn")
class SklearnDataset(Dataset):
"""
Mapping between numpy (or sklearn) datasets to PyTorch datasets.
Example:
>>> from sklearn.datasets import load_boston
>>> from pl_bolts.datamodules import SklearnDataset
...
>>> X, y = load_boston(return_X_y=True)
>>> dataset = SklearnDataset(X, y)
>>> len(dataset)
506
"""
def __init__(self, X: np.ndarray, y: np.ndarray, X_transform: Any = None, y_transform: Any = None):
"""
Args:
X: Numpy ndarray
y: Numpy ndarray
X_transform: Any transform that works with Numpy arrays
y_transform: Any transform that works with Numpy arrays
"""
super().__init__()
self.X = X
self.Y = y
self.X_transform = X_transform
self.y_transform = y_transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
x = self.X[idx].astype(np.float32)
y = self.Y[idx]
# Do not convert integer to float for classification data
if not ((y.dtype == np.int32) or (y.dtype == np.int64)):
y = y.astype(np.float32)
if self.X_transform:
x = self.X_transform(x)
if self.y_transform:
y = self.y_transform(y)
return x, y
class TensorDataset(Dataset):
"""
Prepare PyTorch tensor dataset for data loaders.
Example:
>>> from pl_bolts.datamodules import TensorDataset
...
>>> X = torch.rand(10, 3)
>>> y = torch.rand(10)
>>> dataset = TensorDataset(X, y)
>>> len(dataset)
10
"""
def __init__(self, X: torch.Tensor, y: torch.Tensor, X_transform: Any = None, y_transform: Any = None):
"""
Args:
X: PyTorch tensor
y: PyTorch tensor
X_transform: Any transform that works with PyTorch tensors
y_transform: Any transform that works with PyTorch tensors
"""
super().__init__()
self.X = X
self.Y = y
self.X_transform = X_transform
self.y_transform = y_transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
x = self.X[idx].float()
y = self.Y[idx]
if self.X_transform:
x = self.X_transform(x)
if self.y_transform:
y = self.y_transform(y)
return x, y
class SklearnDataModule(LightningDataModule):
"""
Automatically generates the train, validation and test splits for a Numpy dataset. They are set up as
dataloaders for convenience. Optionally, you can pass in your own validation and test splits.
Example:
>>> from sklearn.datasets import load_boston
>>> from pl_bolts.datamodules import SklearnDataModule
...
>>> X, y = load_boston(return_X_y=True)
>>> loaders = SklearnDataModule(X, y, batch_size=32)
...
>>> # train set
>>> train_loader = loaders.train_dataloader()
>>> len(train_loader.dataset)
355
>>> len(train_loader)
12
>>> # validation set
>>> val_loader = loaders.val_dataloader()
>>> len(val_loader.dataset)
100
>>> len(val_loader)
4
>>> # test set
>>> test_loader = loaders.test_dataloader()
>>> len(test_loader.dataset)
51
>>> len(test_loader)
2
"""
name = 'sklearn'
def __init__(
self,
X,
y,
x_val=None,
y_val=None,
x_test=None,
y_test=None,
val_split=0.2,
test_split=0.1,
num_workers=2,
random_state=1234,
shuffle=True,
batch_size: int = 16,
pin_memory=False,
drop_last=False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.num_workers = num_workers
self.batch_size = batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
# shuffle x and y
if shuffle and _SKLEARN_AVAILABLE:
X, y = sk_shuffle(X, y, random_state=random_state)
elif shuffle and not _SKLEARN_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError(
'You want to use shuffle function from `scikit-learn` which is not installed yet.'
)
val_split = 0 if x_val is not None or y_val is not None else val_split
test_split = 0 if x_test is not None or y_test is not None else test_split
hold_out_split = val_split + test_split
if hold_out_split > 0:
val_split = val_split / hold_out_split
hold_out_size = math.floor(len(X) * hold_out_split)
x_holdout, y_holdout = X[:hold_out_size], y[:hold_out_size]
test_i_start = int(val_split * hold_out_size)
x_val_hold_out, y_val_holdout = x_holdout[:test_i_start], y_holdout[:test_i_start]
x_test_hold_out, y_test_holdout = x_holdout[test_i_start:], y_holdout[test_i_start:]
X, y = X[hold_out_size:], y[hold_out_size:]
# if don't have x_val and y_val create split from X
if x_val is None and y_val is None and val_split > 0:
x_val, y_val = x_val_hold_out, y_val_holdout
# if don't have x_test, y_test create split from X
if x_test is None and y_test is None and test_split > 0:
x_test, y_test = x_test_hold_out, y_test_holdout
self._init_datasets(X, y, x_val, y_val, x_test, y_test)
def _init_datasets(self, X, y, x_val, y_val, x_test, y_test):
self.train_dataset = SklearnDataset(X, y)
self.val_dataset = SklearnDataset(x_val, y_val)
self.test_dataset = SklearnDataset(x_test, y_test)
def train_dataloader(self):
loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def val_dataloader(self):
loader = DataLoader(
self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def test_dataloader(self):
loader = DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
# TODO: this seems to be wrong, something missing here, another inherit class?
# class TensorDataModule(SklearnDataModule):
# """
# Automatically generates the train, validation and test splits for a PyTorch tensor dataset. They are set up as
# dataloaders for convenience. Optionally, you can pass in your own validation and test splits.
#
# Example:
#
# >>> from pl_bolts.datamodules import TensorDataModule
# >>> import torch
# ...
# >>> # create dataset
# >>> X = torch.rand(100, 3)
# >>> y = torch.rand(100)
# >>> loaders = TensorDataModule(X, y)
# ...
# >>> # train set
# >>> train_loader = loaders.train_dataloader(batch_size=10)
# >>> len(train_loader.dataset)
# 70
# >>> len(train_loader)
# 7
# >>> # validation set
# >>> val_loader = loaders.val_dataloader(batch_size=10)
# >>> len(val_loader.dataset)
# 20
# >>> len(val_loader)
# 2
# >>> # test set
# >>> test_loader = loaders.test_dataloader(batch_size=10)
# >>> len(test_loader.dataset)
# 10
# >>> len(test_loader)
# 1
# """
| [
"torch.utils.data.DataLoader"
] | 1.6 | oke-aditya/pytorch-lightning-bolts | 268df20bb442e7385b709b1488d37fd2767aba3c |
1.1 | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""HiFi-GAN Modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
import copy
import logging
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import numpy as np
import torch
import torch.nn.functional as F
from espnet2.gan_tts.hifigan.residual_block import ResidualBlock
class HiFiGANGenerator(torch.nn.Module):
"""HiFiGAN generator module."""
def __init__(
self,
in_channels: int = 80,
out_channels: int = 1,
channels: int = 512,
global_channels: int = -1,
kernel_size: int = 7,
upsample_scales: List[int] = [8, 8, 2, 2],
upsample_kernel_sizes: List[int] = [16, 16, 4, 4],
resblock_kernel_sizes: List[int] = [3, 7, 11],
resblock_dilations: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
use_additional_convs: bool = True,
bias: bool = True,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.1},
use_weight_norm: bool = True,
):
"""Initialize HiFiGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
global_channels (int): Number of global conditioning channels.
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (List[int]): List of upsampling scales.
upsample_kernel_sizes (List[int]): List of kernel sizes for upsample layers.
resblock_kernel_sizes (List[int]): List of kernel sizes for residual blocks.
resblock_dilations (List[List[int]]): List of list of dilations for residual
blocks.
use_additional_convs (bool): Whether to use additional conv layers in
residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
use_weight_norm (bool): Whether to use weight norm. If set to true, it will
be applied to all of the conv layers.
"""
super().__init__()
# check hyperparameters are valid
assert kernel_size % 2 == 1, "Kernel size must be odd number."
assert len(upsample_scales) == len(upsample_kernel_sizes)
assert len(resblock_dilations) == len(resblock_kernel_sizes)
# define modules
self.upsample_factor = int(np.prod(upsample_scales))
self.num_upsamples = len(upsample_kernel_sizes)
self.num_blocks = len(resblock_kernel_sizes)
self.input_conv = torch.nn.Conv1d(
in_channels,
channels,
kernel_size,
1,
padding=(kernel_size - 1) // 2,
)
self.upsamples = torch.nn.ModuleList()
self.blocks = torch.nn.ModuleList()
for i in range(len(upsample_kernel_sizes)):
assert upsample_kernel_sizes[i] == 2 * upsample_scales[i]
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.ConvTranspose1d(
channels // (2 ** i),
channels // (2 ** (i + 1)),
upsample_kernel_sizes[i],
upsample_scales[i],
padding=upsample_scales[i] // 2 + upsample_scales[i] % 2,
output_padding=upsample_scales[i] % 2,
),
)
]
for j in range(len(resblock_kernel_sizes)):
self.blocks += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels // (2 ** (i + 1)),
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
]
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
torch.nn.Conv1d(
channels // (2 ** (i + 1)),
out_channels,
kernel_size,
1,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
if global_channels > 0:
self.global_conv = torch.nn.Conv1d(global_channels, channels, 1)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(
self, c: torch.Tensor, g: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
c = self.input_conv(c)
if g is not None:
c = c + self.global_conv(g)
for i in range(self.num_upsamples):
c = self.upsamples[i](c)
cs = 0.0 # initialize
for j in range(self.num_blocks):
cs += self.blocks[i * self.num_blocks + j](c)
c = cs / self.num_blocks
c = self.output_conv(c)
return c
def reset_parameters(self):
"""Reset parameters.
This initialization follows the official implementation manner.
https://github.com/jik876/hifi-gan/blob/master/models.py
"""
def _reset_parameters(m: torch.nn.Module):
if isinstance(m, (torch.nn.Conv1d, torch.nn.ConvTranspose1d)):
m.weight.data.normal_(0.0, 0.01)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def inference(
self, c: torch.Tensor, g: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Perform inference.
Args:
c (torch.Tensor): Input tensor (T, in_channels).
g (Optional[Tensor]): Global conditioning tensor (global_channels, 1).
Returns:
Tensor: Output tensor (T ** upsample_factor, out_channels).
"""
if g is not None:
g = g.unsqueeze(0)
c = self.forward(c.transpose(1, 0).unsqueeze(0), g=g)
return c.squeeze(0).transpose(1, 0)
class HiFiGANPeriodDiscriminator(torch.nn.Module):
"""HiFiGAN period discriminator module."""
def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
period: int = 3,
kernel_sizes: List[int] = [5, 3],
channels: int = 32,
downsample_scales: List[int] = [3, 3, 3, 3, 1],
max_downsample_channels: int = 1024,
bias: bool = True,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.1},
use_weight_norm: bool = True,
use_spectral_norm: bool = False,
):
"""Initialize HiFiGANPeriodDiscriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
period (int): Period.
kernel_sizes (list): Kernel sizes of initial conv layers and the final conv
layer.
channels (int): Number of initial channels.
downsample_scales (List[int]): List of downsampling scales.
max_downsample_channels (int): Number of maximum downsampling channels.
use_additional_convs (bool): Whether to use additional conv layers in
residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1, "Kernel size must be odd number."
assert kernel_sizes[1] % 2 == 1, "Kernel size must be odd number."
self.period = period
self.convs = torch.nn.ModuleList()
in_chs = in_channels
out_chs = channels
for downsample_scale in downsample_scales:
self.convs += [
torch.nn.Sequential(
torch.nn.Conv2d(
in_chs,
out_chs,
(kernel_sizes[0], 1),
(downsample_scale, 1),
padding=((kernel_sizes[0] - 1) // 2, 0),
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Use downsample_scale + 1?
out_chs = min(out_chs * 4, max_downsample_channels)
self.output_conv = torch.nn.Conv2d(
out_chs,
out_channels,
(kernel_sizes[1] - 1, 1),
1,
padding=((kernel_sizes[1] - 1) // 2, 0),
)
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
if use_spectral_norm:
self.apply_spectral_norm()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
Returns:
list: List of each layer's tensors.
"""
# transform 1d to 2d -> (B, C, T/P, P)
b, c, t = x.shape
if t % self.period != 0:
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t += n_pad
x = x.view(b, c, t // self.period, self.period)
# forward conv
outs = []
for layer in self.convs:
x = layer(x)
outs += [x]
x = self.output_conv(x)
x = torch.flatten(x, 1, -1)
outs += [x]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.spectral_norm(m)
logging.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
class HiFiGANMultiPeriodDiscriminator(torch.nn.Module):
"""HiFiGAN multi-period discriminator module."""
def __init__(
self,
periods: List[int] = [2, 3, 5, 7, 11],
discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initialize HiFiGANMultiPeriodDiscriminator module.
Args:
periods (List[int]): List of periods.
discriminator_params (Dict[str, Any]): Parameters for hifi-gan period
discriminator module. The period parameter will be overwritten.
"""
super().__init__()
self.discriminators = torch.nn.ModuleList()
for period in periods:
params = copy.deepcopy(discriminator_params)
params["period"] = period
self.discriminators += [HiFiGANPeriodDiscriminator(**params)]
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs, which consists of each
layer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
return outs
class HiFiGANScaleDiscriminator(torch.nn.Module):
"""HiFi-GAN scale discriminator module."""
def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
kernel_sizes: List[int] = [15, 41, 5, 3],
channels: int = 128,
max_downsample_channels: int = 1024,
max_groups: int = 16,
bias: int = True,
downsample_scales: List[int] = [2, 2, 4, 4, 1],
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.1},
use_weight_norm: bool = True,
use_spectral_norm: bool = False,
):
"""Initilize HiFiGAN scale discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_sizes (List[int]): List of four kernel sizes. The first will be used
for the first conv layer, and the second is for downsampling part, and
the remaining two are for the last two output layers.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling
layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (List[int]): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
use_weight_norm (bool): Whether to use weight norm. If set to true, it will
be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm. If set to true, it
will be applied to all of the conv layers.
"""
super().__init__()
self.layers = torch.nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == 4
for ks in kernel_sizes:
assert ks % 2 == 1
# add first layer
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_channels,
channels,
# NOTE(kan-bayashi): Use always the same kernel size
kernel_sizes[0],
bias=bias,
padding=(kernel_sizes[0] - 1) // 2,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add downsample layers
in_chs = channels
out_chs = channels
# NOTE(kan-bayashi): Remove hard coding?
groups = 4
for downsample_scale in downsample_scales:
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[1],
stride=downsample_scale,
padding=(kernel_sizes[1] - 1) // 2,
groups=groups,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Remove hard coding?
out_chs = min(in_chs * 2, max_downsample_channels)
# NOTE(kan-bayashi): Remove hard coding?
groups = min(groups * 4, max_groups)
# add final layers
out_chs = min(in_chs * 2, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[2],
stride=1,
padding=(kernel_sizes[2] - 1) // 2,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.layers += [
torch.nn.Conv1d(
out_chs,
out_channels,
kernel_size=kernel_sizes[3],
stride=1,
padding=(kernel_sizes[3] - 1) // 2,
bias=bias,
),
]
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
if use_spectral_norm:
self.apply_spectral_norm()
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List[Tensor]: List of output tensors of each layer.
"""
outs = []
for f in self.layers:
x = f(x)
outs += [x]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.spectral_norm(m)
logging.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
class HiFiGANMultiScaleDiscriminator(torch.nn.Module):
"""HiFi-GAN multi-scale discriminator module."""
def __init__(
self,
scales: int = 3,
downsample_pooling: str = "AvgPool1d",
# follow the official implementation setting
downsample_pooling_params: Dict[str, Any] = {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm: bool = False,
):
"""Initilize HiFiGAN multi-scale discriminator module.
Args:
scales (int): Number of multi-scales.
downsample_pooling (str): Pooling module name for downsampling of the
inputs.
downsample_pooling_params (Dict[str, Any]): Parameters for the above pooling
module.
discriminator_params (Dict[str, Any]): Parameters for hifi-gan scale
discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the
official implementaion. The first discriminator uses spectral norm
and the other discriminators use weight norm.
"""
super().__init__()
self.discriminators = torch.nn.ModuleList()
# add discriminators
for i in range(scales):
params = copy.deepcopy(discriminator_params)
if follow_official_norm:
if i == 0:
params["use_weight_norm"] = False
params["use_spectral_norm"] = True
else:
params["use_weight_norm"] = True
params["use_spectral_norm"] = False
self.discriminators += [HiFiGANScaleDiscriminator(**params)]
self.pooling = None
if scales > 1:
self.pooling = getattr(torch.nn, downsample_pooling)(
**downsample_pooling_params
)
def forward(self, x: torch.Tensor) -> List[List[torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List[List[torch.Tensor]]: List of list of each discriminator outputs,
which consists of eachlayer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
if self.pooling is not None:
x = self.pooling(x)
return outs
class HiFiGANMultiScaleMultiPeriodDiscriminator(torch.nn.Module):
"""HiFi-GAN multi-scale + multi-period discriminator module."""
def __init__(
self,
# Multi-scale discriminator related
scales: int = 3,
scale_downsample_pooling: str = "AvgPool1d",
scale_downsample_pooling_params: Dict[str, Any] = {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
scale_discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm: bool = True,
# Multi-period discriminator related
periods: List[int] = [2, 3, 5, 7, 11],
period_discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initilize HiFiGAN multi-scale + multi-period discriminator module.
Args:
scales (int): Number of multi-scales.
scale_downsample_pooling (str): Pooling module name for downsampling of the
inputs.
scale_downsample_pooling_params (dict): Parameters for the above pooling
module.
scale_discriminator_params (dict): Parameters for hifi-gan scale
discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the
official implementaion. The first discriminator uses spectral norm and
the other discriminators use weight norm.
periods (list): List of periods.
period_discriminator_params (dict): Parameters for hifi-gan period
discriminator module. The period parameter will be overwritten.
"""
super().__init__()
self.msd = HiFiGANMultiScaleDiscriminator(
scales=scales,
downsample_pooling=scale_downsample_pooling,
downsample_pooling_params=scale_downsample_pooling_params,
discriminator_params=scale_discriminator_params,
follow_official_norm=follow_official_norm,
)
self.mpd = HiFiGANMultiPeriodDiscriminator(
periods=periods,
discriminator_params=period_discriminator_params,
)
def forward(self, x: torch.Tensor) -> List[List[torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List[List[Tensor]]: List of list of each discriminator outputs,
which consists of each layer output tensors. Multi scale and
multi period ones are concatenated.
"""
msd_outs = self.msd(x)
mpd_outs = self.mpd(x)
return msd_outs + mpd_outs
| [
"torch.nn.ConvTranspose1d",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.nn.utils.remove_weight_norm",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.utils.weight_norm",
"torch.nn.Conv2d",
"torch.nn.utils.spectral_norm",
"torch.flatten",
"torch.nn.functional.pad"
] | 1.1.0 | Gold-Sea/espnet | f31fc9a832801ef725a003e97ab7a5496046dc66 |
1.2 | import torch.nn as nn
class Actor(nn.Module):
"""Actor neural network.
Parameters
----------
nn : class
Torch neural network class
Returns
-------
array 1-D
An array containing the action for a given state
"""
def __init__(self, n_states, n_actions, config):
super(Actor, self).__init__()
self.net = nn.Sequential(nn.Linear(n_states, 128), nn.Linear(128, 128),
nn.Linear(128, 128),
nn.Linear(128, n_actions))
def forward(self, state):
action = self.net(state)
return action
class Critic(nn.Module):
"""A critic neural network.
Parameters
----------
nn : class
Torch neural network class
Returns
-------
float
The value of a given state
"""
def __init__(self, n_states, config):
super(Critic, self).__init__()
self.net = nn.Sequential(nn.Linear(n_states, 128), nn.Linear(128, 128),
nn.Linear(128, 128), nn.Linear(128, 1))
def forward(self, state):
value = self.net(state)
return value
| [
"torch.nn.Linear"
] | 1.2.0 | janiapurv/swarm-reinforcement-learning | 118c952f96e0c6cc093996fa1c76e7cc8a544bad |
1.6 | # author: github/zabir-nabil
# relevant imports
import os
import cv2
import pydicom
import pandas as pd
import numpy as np
# import tensorflow as tf
# import matplotlib.pyplot as plt
# torch dataset
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import random
from tqdm import tqdm
# k-fold
from sklearn.model_selection import KFold
# hyperparam object
from config import HyperP
hyp = HyperP(model_type = "singlemodal_ct") # slope prediction
# seed
seed = hyp.seed
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
# tf.random.set_seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# path
root_path = hyp.data_folder # ../input/osic-pulmonary-fibrosis-progression
train = pd.read_csv(f'{root_path}/train.csv')
train_vol = pd.read_csv(f'{hyp.ct_tab_feature_csv}')
train['Volume'] = 2000.
for i in range(len(train)):
pid = train.iloc[i]['Patient']
try:
train.at[i, 'Volume'] = train_vol[train_vol['Patient']==pid].iloc[0]['Volume']
except:
print('bug at volume')
# tabular feature generation
def get_tab(df):
# passing a zero vector, no features encoded
return np.zeros(5, dtype=np.float)
vector = [(df.Age.values[0] - train.Age.values.mean()) / train.Age.values.std()] # df.Age.values[0].mean(), df.Age.values[0].std()
if df.Sex.values[0] == 'Male':
vector.append(0)
else:
vector.append(1)
if df.SmokingStatus.values[0] == 'Never smoked':
vector.extend([0,0])
elif df.SmokingStatus.values[0] == 'Ex-smoker':
vector.extend([1,1])
elif df.SmokingStatus.values[0] == 'Currently smokes':
vector.extend([0,1])
else:
vector.extend([1,0]) # this is useless
vector.append((df.Volume.values[0] - train.Volume.values.mean()) / train.Volume.values.std())
return np.array(vector)
A = {} # the slopes
TAB = {} # tabular features
P = [] # patient IDs
for i, p in tqdm(enumerate(train.Patient.unique())):
sub = train.loc[train.Patient == p, :]
fvc = sub.FVC.values
weeks = sub.Weeks.values
c = np.vstack([weeks, np.ones(len(weeks))]).T
a, _ = np.linalg.lstsq(c, fvc)[0] # we calculate the best slope with least square
# ref: https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
A[p] = a
TAB[p] = get_tab(sub)
P.append(p)
class OSICData(Dataset):
BAD_ID = ['ID00011637202177653955184', 'ID00052637202186188008618']
def __init__(self, keys, a, tab):
self.keys = [k for k in keys if k not in self.BAD_ID]
self.a = a
self.tab = tab
self.train_data = {}
for p in train.Patient.values:
p_n = len(os.listdir(f'{root_path}/train/{p}/'))
self.train_data[p] = os.listdir(f'{root_path}/train/{p}/')[int( hyp.strip_ct * p_n):-int( hyp.strip_ct * p_n)] # removing first and last 15% slices
def __len__(self):
return len(self.keys)
def get_img(self, path):
d = pydicom.dcmread(path)
return cv2.resize(d.pixel_array / 2**11, (512, 512)) # maybe bug in resize
def __getitem__(self, idx):
x = []
a, tab = [], []
k = self.keys[idx] # instead of random id send a specific id
# np.random.choice(self.keys, 1)[0]
try:
i = np.random.choice(self.train_data[k], size=1)[0]
# print(i)
cp = f'{root_path}/train/{k}/{i}'
#print(cp)
img = self.get_img(cp)
x.append(img)
a.append(self.a[k])
tab.append(self.tab[k])
except:
print('failed')
print(k, i)
x, a, tab = torch.tensor(x, dtype=torch.float32), torch.tensor(a, dtype=torch.float32), torch.tensor(tab, dtype=torch.float32)
tab = torch.squeeze(tab, axis=0)
return [x, tab] , a, k # k for patient id
from torchvision import models
from torch import nn
from efficientnet_pytorch import EfficientNet
from efficientnet_pytorch.utils import Conv2dStaticSamePadding
class Identity(nn.Module):
# credit: ptrblck
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self,in_dim):
super(Self_Attn,self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
self.gamma = nn.Parameter(torch.rand(1)) # random initialization
self.softmax = nn.Softmax(dim=-1) #
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize,C,width ,height = x.size()
proj_query = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H)
energy = torch.bmm(proj_query,proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N
out = torch.bmm(proj_value,attention.permute(0,2,1) )
out = out.view(m_batchsize,C,width,height)
out = self.gamma*out + x
return out # , attention
# only based on best config of b2
class TabCT(nn.Module):
def __init__(self, cnn, attn_filters, fc_dim, n_attn_layers):
super(TabCT, self).__init__()
# CT features
cnn_dict = {'resnet18': models.resnet18, 'resnet34': models.resnet34, 'resnet50': models.resnet50,
'resnet101': models.resnet101, 'resnet152': models.resnet152, 'resnext50': models.resnext50_32x4d,
'resnext101': models.resnext101_32x8d}
# feature dim
self.out_dict = {'resnet18': 512, 'resnet34': 512, 'resnet50': 2048, 'resnet101': 2048, 'resnet152': 2048,
'resnext50': 2048, 'resnext101': 2048, "efnb0": 1280, "efnb1": 1280, "efnb2": 1408,
"efnb3": 1536, "efnb4": 1792, "efnb5": 2048, "efnb6": 2304, "efnb7": 2560}
self.n_tab = hyp.n_tab # n tabular features
self.attn_filters = attn_filters
self.fc_dim = fc_dim
self.n_attn_layers = n_attn_layers
# efficient net b2 base
if cnn in cnn_dict.keys():
self.ct_cnn = cnn_dict[cnn](pretrained = True)
# make single channel
self.ct_cnn.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.ct_cnn.avgpool = nn.Conv2d(self.out_dict[cnn], self.attn_filters, kernel_size=(1, 1), bias=False)
self.ct_cnn.fc = nn.Identity()
# 1 self attn layer [stacked]
self.attn = nn.ModuleList()
for _ in range(self.n_attn_layers):
self.attn.append(Self_Attn(self.attn_filters))
elif 'efn' in cnn:
if 'b0' in cnn:
self.ct_cnn = EfficientNet.from_pretrained('efficientnet-b0')
self.ct_cnn._conv_stem = Conv2dStaticSamePadding(1, 32, kernel_size = (3,3), stride = (2,2),
bias = False, image_size = 512)
elif 'b1' in cnn:
self.ct_cnn = EfficientNet.from_pretrained('efficientnet-b1')
self.ct_cnn._conv_stem = Conv2dStaticSamePadding(1, 32, kernel_size = (3,3), stride = (2,2),
bias = False, image_size = 512)
elif 'b2' in cnn:
self.ct_cnn = EfficientNet.from_pretrained('efficientnet-b2')
self.ct_cnn._conv_stem = Conv2dStaticSamePadding(1, 32, kernel_size = (3,3), stride = (2,2),
bias = False, image_size = 512)
elif 'b3' in cnn:
self.ct_cnn = EfficientNet.from_pretrained('efficientnet-b3')
self.ct_cnn._conv_stem = Conv2dStaticSamePadding(1, 40, kernel_size = (3,3), stride = (2,2),
bias = False, image_size = 512)
elif 'b4' in cnn:
self.ct_cnn = EfficientNet.from_pretrained('efficientnet-b4')
self.ct_cnn._conv_stem = Conv2dStaticSamePadding(1, 48, kernel_size = (3,3), stride = (2,2),
bias = False, image_size = 512)
# replace avg_pool layer
# 1408 is the number of filters in last conv
self.ct_cnn._avg_pooling = Conv2dStaticSamePadding(self.out_dict[ cnn.split('_')[0] ], self.attn_filters, kernel_size = (1,1), stride = (1,1),
bias = False, image_size = 512)
self.ct_cnn._dropout = nn.Identity()
self.ct_cnn._fc = nn.Identity()
self.ct_cnn._swish = nn.Identity()
# 1 self attn layer [stacked]
self.attn = nn.ModuleList()
for _ in range(self.n_attn_layers):
self.attn.append(Self_Attn(self.attn_filters))
else:
raise ValueError("cnn not recognized")
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.dropout = nn.Dropout(p=0.25)
self.fc_inter = nn.Linear(self.attn_filters + self.n_tab, self.fc_dim)
self.fc = nn.Linear(self.fc_dim, 1)
def forward(self, x_ct, x_tab):
print(x_ct.shape)
ct_f = self.ct_cnn(x_ct).view(-1, self.attn_filters, 16, 16) # ct features
#print(ct_f.shape)
for ii in range(len(self.attn)):
ct_f = self.attn[ii](ct_f)
#print(ct_f.shape)
ct_f = self.avgpool(ct_f).view(-1, self.attn_filters)
#print(ct_f.shape)
# print(x_tab.shape)
# concatenate
x = torch.cat((ct_f, x_tab), -1) # concat on last axis
# dropout
if self.training:
x = self.dropout(x)
x = self.fc_inter(x)
x = self.fc(x)
return x
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# score calculation
def score(fvc_true, fvc_pred, sigma):
sigma_clip = np.maximum(sigma, 70)
delta = np.abs(fvc_true - fvc_pred)
delta = np.minimum(delta, 1000)
sq2 = np.sqrt(2)
metric = (delta / sigma_clip)*sq2 + np.log(sigma_clip* sq2)
return np.mean(metric)
def score_avg(p, a): # patient id, predicted a
percent_true = train.Percent.values[train.Patient == p]
fvc_true = train.FVC.values[train.Patient == p]
weeks_true = train.Weeks.values[train.Patient == p]
fvc = a * (weeks_true - weeks_true[0]) + fvc_true[0]
percent = percent_true[0] - a * abs(weeks_true - weeks_true[0])
return score(fvc_true, fvc, percent)
def rmse_avg(p, a): # patient id, predicted a
percent_true = train.Percent.values[train.Patient == p]
fvc_true = train.FVC.values[train.Patient == p]
weeks_true = train.Weeks.values[train.Patient == p]
fvc = a * (weeks_true - weeks_true[0]) + fvc_true[0]
return mean_squared_error(fvc_true, fvc, squared = False)
# hyperparams
result_dir = hyp.results_dir
# training only resnet models on gpu 0
train_models = hyp.train_models
# 'resnext101' -> seems too heavy for 1080
# 'efnb0', 'efnb1', 'efnb2', 'efnb3', 'efnb4', 'efnb5', 'efnb6', 'efnb7'
# device
gpu = torch.device(f"cuda:{hyp.gpu_index}" if torch.cuda.is_available() else "cpu")
nfold = hyp.nfold # hyper
# removing noisy data
P = [p for p in P if p not in ['ID00011637202177653955184', 'ID00052637202186188008618']]
for model in train_models:
for fd in hyp.fc_dim:
for af in hyp.attn_filters:
for nal in hyp.n_attn_layers:
log = open(f"{result_dir}/{model}_fd_{fd}_af_{af}_nal_{nal}.txt", "a+")
kfold =KFold(n_splits=nfold)
ifold = 0
for train_index, test_index in kfold.split(P):
# print(train_index, test_index)
p_train = np.array(P)[train_index]
p_test = np.array(P)[test_index]
osic_train = OSICData(p_train, A, TAB)
train_loader = DataLoader(osic_train, batch_size=hyp.batch_size, shuffle=True, num_workers=hyp.num_workers)
osic_val = OSICData(p_test, A, TAB)
val_loader = DataLoader(osic_val, batch_size=hyp.batch_size, shuffle=True, num_workers=hyp.num_workers)
tabct = TabCT(cnn = model, fc_dim = fd, attn_filters = af, n_attn_layers = nal).to(gpu)
print(f"creating {model} with {fd} feature_dim, {af} attn_filters, and {nal} n_attn_layers")
print(f"fold: {ifold}")
log.write(f"fold: {ifold}\n")
ifold += 1
n_epochs = hyp.n_epochs # max 30 epochs, patience 5, find the suitable epoch number for later final training
best_epoch = n_epochs # 30
optimizer = torch.optim.AdamW(tabct.parameters())
criterion = torch.nn.L1Loss()
max_score = 99999999.0000 # here, max score ]= minimum score
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
tabct.train()
for i, data in tqdm(enumerate(train_loader, 0)):
[x, t], a, _ = data
print(x.shape)
print(t.shape)
x = x.to(gpu)
t = t.to(gpu)
a = a.to(gpu)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = tabct(x, t)
loss = criterion(outputs, a)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
print(f"epoch {epoch+1} train: {running_loss}")
log.write(f"epoch {epoch+1} train: {running_loss}\n")
running_loss = 0.0
pred_a = {}
tabct.eval()
for i, data in tqdm(enumerate(val_loader, 0)):
[x, t], a, pid = data
x = x.to(gpu)
t = t.to(gpu)
a = a.to(gpu)
# forward
outputs = tabct(x, t)
loss = criterion(outputs, a)
pids = pid
preds_a = outputs.detach().cpu().numpy().flatten()
for j, p_d in enumerate(pids):
pred_a[p_d] = preds_a[j]
# print statistics
running_loss += loss.item()
print(f"epoch {epoch+1} val: {running_loss}")
log.write(f"epoch {epoch+1} val: {running_loss}\n")
# score calculation
print(pred_a)
print(len(pred_a))
print(p_test)
print(len(p_test))
score_v = 0.
rmse = 0.
for p in p_test:
score_v += (score_avg(p, pred_a[p]))/len(p_test)
rmse += (rmse_avg(p, pred_a[p]))/len(p_test)
print(f"val score: {score_v}")
log.write(f"val score: {score_v}\n")
log.write(f"val rmse: {rmse}\n")
if score_v <= max_score:
torch.save({
'epoch': epoch,
'model_state_dict': tabct.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'score': score_v
}, f"{result_dir}/{model}_fd_{fd}_af_{af}_nal_{nal}.tar")
max_score = score_v
best_epoch = epoch + 1
# destroy model
del tabct
torch.cuda.empty_cache()
# final training with optimized setting
osic_all = OSICData(P, A, TAB)
all_loader = DataLoader(osic_all, batch_size=8, shuffle=True, num_workers=hyp.num_workers)
# load the best model
tabct = TabCT(cnn = model, fc_dim = fd, attn_filters = af, n_attn_layers = nal).to(gpu)
tabct.load_state_dict(torch.load(f"{result_dir}/{model}_fd_{fd}_af_{af}_nal_{nal}.tar")["model_state_dict"])
optimizer = torch.optim.AdamW(tabct.parameters(), lr = hyp.final_lr) # very small learning rate
criterion = torch.nn.L1Loss()
print(f"Final training")
log.write(f"Final training\n")
for epoch in range(best_epoch + 2): # loop over the dataset multiple times
running_loss = 0.0
tabct.train()
for i, data in tqdm(enumerate(all_loader, 0)):
[x, t], a, _ = data
x = x.to(gpu)
t = t.to(gpu)
a = a.to(gpu)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = tabct(x, t)
loss = criterion(outputs, a)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
print(f"epoch {epoch+1} train: {running_loss}")
log.write(f"epoch {epoch+1} train: {running_loss}\n")
torch.save({
'epoch': best_epoch,
'model_state_dict': tabct.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, f"{result_dir}/{model}.tar")
print('Finished Training')
# destroy model
del tabct
torch.cuda.empty_cache()
# ref: https://www.kaggle.com/miklgr500/linear-decay-based-on-resnet-cnn
# https://pytorch.org/docs/stable/index.html | [
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.bmm",
"torch.squeeze",
"torch.cuda.is_available",
"torch.load",
"torch.nn.Softmax",
"torch.manual_seed",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.nn.Identity",
"torch.nn.Conv2d",
"torch.cuda.empty_cache",
"torch.rand",
"torch.nn.Dropout",
"torch.nn.L1Loss",
"torch.nn.AdaptiveAvgPool2d"
] | 1.6.0 | greenFantasy/Fibro-CoSANet | d7b472a93ae70a7355b221a820d0254c590fdf03 |
1.7 | from argparse import ArgumentParser
from typing import Tuple
from warnings import warn
import torch
import torch.nn as nn
from torch import Tensor
from lasaft.data.musdb_wrapper import SingleTrackSet
from lasaft.source_separation.conditioned.separation_framework import Spectrogram_based
from lasaft.utils.functions import get_activation_by_name, string_to_list
class Dense_CUNet(nn.Module):
def __init__(self,
n_fft,
input_channels, internal_channels,
n_blocks, n_internal_layers,
mk_block_f, mk_ds_f, mk_us_f,
first_conv_activation, last_activation,
t_down_layers, f_down_layers,
# Conditional Mechanism #
control_vector_type, control_input_dim, embedding_dim, condition_to
):
first_conv_activation = get_activation_by_name(first_conv_activation)
last_activation = get_activation_by_name(last_activation)
super(Dense_CUNet, self).__init__()
'''num_block should be an odd integer'''
assert n_blocks % 2 == 1
dim_f, t_down_layers, f_down_layers = self.mk_overall_structure(n_fft, internal_channels, input_channels,
n_blocks,
n_internal_layers, last_activation,
first_conv_activation,
t_down_layers, f_down_layers)
self.mk_blocks(dim_f, internal_channels, mk_block_f, mk_ds_f, mk_us_f, t_down_layers)
#########################
# Conditional Mechanism #
#########################
assert control_vector_type in ['one_hot_mode', 'embedding']
if control_vector_type == 'one_hot_mode':
if control_input_dim != embedding_dim:
warn('in one_hot_mode, embedding_dim should be the same as num_targets. auto correction')
embedding_dim = control_input_dim
with torch.no_grad():
one_hot_weight = torch.zeros((control_input_dim, embedding_dim))
for i in range(control_input_dim):
one_hot_weight[i, i] = 1.
self.embedding = nn.Embedding(control_input_dim, embedding_dim, _weight=one_hot_weight)
self.embedding.weight.requires_grad = True
elif control_vector_type == 'embedding':
self.embedding = nn.Embedding(control_input_dim, embedding_dim)
# Where to condition
assert condition_to in ['encoder', 'decoder', 'full']
self.is_encoder_conditioned = self.is_middle_conditioned = self.is_decoder_conditioned = False
if condition_to == 'encoder':
self.is_encoder_conditioned = True
elif condition_to == 'decoder':
self.is_decoder_conditioned = True
elif condition_to == 'full':
self.is_encoder_conditioned = self.is_middle_conditioned = self.is_decoder_conditioned = True
else:
raise NotImplementedError
self.activation = self.last_conv[-1]
def mk_blocks(self, dim_f, internal_channels, mk_block_f, mk_ds_f, mk_us_f, t_down_layers):
f = dim_f
for i in range(self.n):
self.encoders.append(mk_block_f(internal_channels, internal_channels, f))
ds_layer, f = mk_ds_f(internal_channels, i, f, t_down_layers)
self.downsamplings.append(ds_layer)
self.mid_block = mk_block_f(internal_channels, internal_channels, f)
for i in range(self.n):
us_layer, f = mk_us_f(internal_channels, i, f, self.n, t_down_layers)
self.upsamplings.append(us_layer)
self.decoders.append(mk_block_f(2 * internal_channels, internal_channels, f))
def mk_overall_structure(self, n_fft, internal_channels, input_channels, n_blocks, n_internal_layers,
last_activation, first_conv_activation, t_down_layers, f_down_layers):
dim_f = n_fft // 2
input_channels = input_channels
self.first_conv = nn.Sequential(
nn.Conv2d(
in_channels=input_channels,
out_channels=internal_channels,
kernel_size=(1, 2),
stride=1
),
nn.BatchNorm2d(internal_channels),
first_conv_activation(),
)
self.encoders = nn.ModuleList()
self.downsamplings = nn.ModuleList()
self.decoders = nn.ModuleList()
self.upsamplings = nn.ModuleList()
self.last_conv = nn.Sequential(
nn.Conv2d(
in_channels=internal_channels,
out_channels=input_channels,
kernel_size=(1, 2),
stride=1,
padding=(0, 1)
),
last_activation()
)
self.n = n_blocks // 2
if t_down_layers is None:
t_down_layers = list(range(self.n))
elif n_internal_layers == 'None':
t_down_layers = list(range(self.n))
else:
t_down_layers = string_to_list(t_down_layers)
if f_down_layers is None:
f_down_layers = list(range(self.n))
elif n_internal_layers == 'None':
f_down_layers = list(range(self.n))
else:
f_down_layers = string_to_list(f_down_layers)
return dim_f, t_down_layers, f_down_layers
def forward(self, input_spec, input_condition):
condition_embedding = self.embedding(input_condition)
gammas, betas = self.condition_generator(condition_embedding)
x = self.first_conv(input_spec)
encoding_outputs = []
for encoder, downsampling, gamma, beta in zip(self.encoders, self.downsamplings, gammas, betas):
x = encoder(x)
x = self.film(x, gamma, beta)
encoding_outputs.append(x)
x = downsampling(x)
x = self.mid_block(x)
for i in range(self.n):
x = self.upsamplings[i](x)
x = torch.cat((x, encoding_outputs[-i - 1]), 1)
x = self.decoders[i](x)
return self.last_conv(x)
class Dense_CUNet_Framework(Spectrogram_based):
def __init__(self, n_fft, hop_length, num_frame,
spec_type, spec_est_mode,
spec2spec,
optimizer, lr, train_loss, val_loss
):
super(Dense_CUNet_Framework, self).__init__(
n_fft, hop_length, num_frame,
spec_type, spec_est_mode,
spec2spec,
optimizer, lr, train_loss, val_loss
)
def to_spec(self, input_signal) -> torch.Tensor:
if self.magnitude_based:
return self.stft.to_mag(input_signal).transpose(-1, -3)
else:
spec_complex = self.stft.to_spec_complex(input_signal) # *, N, T, 2, ch
spec_complex = torch.flatten(spec_complex, start_dim=-2) # *, N, T, 2ch
return spec_complex.transpose(-1, -3) # *, 2ch, T, N
def forward(self, input_signal, input_condition) -> torch.Tensor:
input_spec = self.to_spec(input_signal)
output_spec = self.spec2spec(input_spec, input_condition)
if self.masking_based:
output_spec = input_spec * output_spec
return output_spec
def separate(self, input_signal, input_condition) -> torch.Tensor:
phase = None
if self.magnitude_based:
mag, phase = self.stft.to_mag_phase(input_signal)
input_spec = mag.transpose(-1, -3)
else:
spec_complex = self.stft.to_spec_complex(input_signal) # *, N, T, 2, ch
spec_complex = torch.flatten(spec_complex, start_dim=-2) # *, N, T, 2ch
input_spec = spec_complex.transpose(-1, -3) # *, 2ch, T, N
output_spec = self.spec2spec(input_spec, input_condition)
if self.masking_based:
output_spec = input_spec * output_spec
else:
pass # Use the original output_spec
output_spec = output_spec.transpose(-1, -3)
if self.magnitude_based:
restored = self.stft.restore_mag_phase(output_spec, phase)
else:
# output_spec: *, N, T, 2ch
output_spec = output_spec.view(list(output_spec.shape)[:-1] + [2, -1]) # *, N, T, 2, ch
restored = self.stft.restore_complex(output_spec)
return restored
def separate_and_return_spec(self, input_signal, input_condition) -> Tuple[Tensor, Tensor]:
phase = None
if self.magnitude_based:
mag, phase = self.stft.to_mag_phase(input_signal)
input_spec = mag.transpose(-1, -3)
else:
spec_complex = self.stft.to_spec_complex(input_signal) # *, N, T, 2, ch
spec_complex = torch.flatten(spec_complex, start_dim=-2) # *, N, T, 2ch
input_spec = spec_complex.transpose(-1, -3) # *, 2ch, T, N
output_spec = self.spec2spec(input_spec, input_condition)
if self.masking_based:
output_spec = input_spec * output_spec
else:
pass # Use the original output_spec
output_spec_cache = output_spec
output_spec = output_spec.transpose(-1, -3)
if self.magnitude_based:
restored = self.stft.restore_mag_phase(output_spec, phase)
else:
# output_spec: *, N, T, 2ch
output_spec = output_spec.view(list(output_spec.shape)[:-1] + [2, -1]) # *, N, T, 2, ch
restored = self.stft.restore_complex(output_spec)
return restored, output_spec_cache
def separate_track(self, input_signal, target) -> torch.Tensor:
import numpy as np
self.eval()
with torch.no_grad():
db = SingleTrackSet(input_signal, self.hop_length, self.num_frame)
assert target in db.source_names
separated = []
input_condition = np.array(db.source_names.index(target))
input_condition = torch.tensor(input_condition, dtype=torch.long, device=self.device).view(1)
for item in db:
separated.append(self.separate(item.unsqueeze(0).to(self.device), input_condition)[0]
[self.trim_length:-self.trim_length].detach().cpu().numpy())
separated = np.concatenate(separated, axis=0)
import soundfile
soundfile.write('temp.wav', separated, 44100)
return soundfile.read('temp.wav')[0]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--n_blocks', type=int, default=7)
parser.add_argument('--input_channels', type=int, default=4)
parser.add_argument('--internal_channels', type=int, default=24)
parser.add_argument('--first_conv_activation', type=str, default='relu')
parser.add_argument('--last_activation', type=str, default='identity')
parser.add_argument('--t_down_layers', type=tuple, default=None)
parser.add_argument('--f_down_layers', type=tuple, default=None)
parser.add_argument('--control_vector_type', type=str, default='embedding')
parser.add_argument('--control_input_dim', type=int, default=4)
parser.add_argument('--embedding_dim', type=int, default=32)
parser.add_argument('--condition_to', type=str, default='decoder')
return Spectrogram_based.add_model_specific_args(parser)
| [
"torch.zeros",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.BatchNorm2d",
"torch.no_grad",
"torch.nn.Conv2d",
"torch.tensor",
"torch.flatten",
"torch.nn.Embedding"
] | 1.7.1 | YeongSeokJeong/Conditioned-Source-Separation-LaSAFT | fc06d200de0f369d447b247ca007c686583ec87a |
1.7 | """recur_cnn.py
Recurrent CNN models.
"""
import torch
import torch.nn as nn
class RecurCNN(nn.Module):
def __init__(self, num_outputs, width, depth, in_channels):
super().__init__()
self.num_outputs = num_outputs
self.width = width
self.iters = depth - 3
self.first_layers = nn.Sequential(nn.Conv2d(in_channels, int(self.width / 2),
kernel_size=3, stride=1),
nn.ReLU(),
nn.Conv2d(int(self.width / 2), self.width, kernel_size=3,
stride=1),
nn.ReLU())
self.recur_block = nn.Sequential(nn.Conv2d(self.width, self.width, kernel_size=3, stride=1,
padding=1), nn.ReLU())
self.last_layers = nn.Sequential(nn.MaxPool2d(3),
nn.Conv2d(self.width, 2*self.width, kernel_size=3,
stride=1),
nn.ReLU(),
nn.MaxPool2d(3))
self.linear = nn.Linear(8 * width, num_outputs)
def forward(self, x):
self.thoughts = torch.zeros((self.iters, x.shape[0], self.num_outputs)).to(x.device)
out = self.first_layers(x)
for i in range(self.iters):
out = self.recur_block(out)
thought = self.last_layers(out)
thought = thought.view(thought.size(0), -1)
self.thoughts[i] = self.linear(thought)
return self.thoughts[-1]
def recur_cnn(num_outputs, depth, width, dataset):
in_channels = {"CIFAR10": 3, "SVHN": 3, "EMNIST": 1}[dataset.upper()]
return RecurCNN(num_outputs, width, depth, in_channels)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.7.0 | Arjung27/DeepThinking | 13a2ce534bcb0b9379a22fffef52d975d650adb2 |
1.4 | import os
import time
import argparse
import math
from numpy import finfo
import torch
from distributed import apply_gradient_allreduce
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from model import Tacotron2
from data_utils import TextMelLoader, TextMelCollate
from loss_function import Tacotron2Loss
from logger import Tacotron2Logger
from hparams import create_hparams
from text import sequence_to_text
def reduce_tensor(tensor, n_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= n_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
print("Done initializing distributed")
def prepare_dataloaders(hparams):
# Get data, data loaders and collate function ready
trainset = TextMelLoader(hparams.training_files, hparams)
valset = TextMelLoader(hparams.validation_files, hparams)
collate_fn = TextMelCollate(hparams.n_frames_per_step)
if hparams.distributed_run:
train_sampler = DistributedSampler(trainset)
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = DataLoader(trainset, num_workers=0, shuffle=shuffle,
sampler=train_sampler,
batch_size=hparams.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
return train_loader, valset, collate_fn
def prepare_directories_and_logger(output_directory, log_directory, rank):
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
else:
logger = None
return logger
def load_model(hparams):
model = Tacotron2(hparams).cuda()
if hparams.fp16_run:
model.decoder.attention_layer.score_mask_value = finfo('float16').min
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
return model
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items() if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
for i, batch in enumerate(val_loader):
x, y = model.parse_batch(batch)
padded_sequences = x[2].cpu().numpy()
seqs = [[v for v in padded_sequence if v != 0] for padded_sequence in padded_sequences]
txts = [sequence_to_text(txt) for txt in seqs]
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
val_loss += reduced_val_loss
print(f'validation step {i} -- loss: {loss}')
val_loss = val_loss / (i + 1)
model.train()
if rank == 0:
print("Validation loss {}: {:9f} ".format(iteration, val_loss))
logger.log_validation(val_loss, model, y, y_pred, iteration, txts)
def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
rank, group_name, hparams):
"""Training and validation logging results to tensorboard and stdout
Params
------
output_directory (string): directory to save checkpoints
log_directory (string) directory to save tensorboard logs
checkpoint_path(string): checkpoint path
n_gpus (int): number of gpus
rank (int): rank of current gpu
hparams (object): comma separated list of "name=value" pairs.
"""
if hparams.distributed_run:
init_distributed(hparams, n_gpus, rank, group_name)
torch.manual_seed(hparams.seed)
torch.cuda.manual_seed(hparams.seed)
model = load_model(hparams)
learning_rate = hparams.learning_rate
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level='O2')
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
criterion = Tacotron2Loss()
logger = prepare_directories_and_logger(
output_directory, log_directory, rank)
train_loader, valset, collate_fn = prepare_dataloaders(hparams)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(checkpoint_path, model, hparams.ignore_layers)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.train()
is_overflow = False
import random
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
start = time.perf_counter()
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
model.zero_grad()
x, y = model.parse_batch(batch)
if random.randint(0,50) == 34:
padded_sequences = x[2].cpu().numpy()
languageid = x[0].cpu().numpy()
speakerid= x[1].cpu().numpy()
seqs = [[v for v in padded_sequence if v != 0] for padded_sequence in padded_sequences]
txts = [sequence_to_text(txt) for txt in seqs]
print(f'lang: {languageid[0]}\n speakerid: {speakerid[0]}\n input_text: {txts[0]}')
y_pred = model(x)
loss = criterion(y_pred, y)
if hparams.distributed_run:
reduced_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_loss = loss.item()
if hparams.fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if hparams.fp16_run:
grad_norm = torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), hparams.grad_clip_thresh)
is_overflow = math.isnan(grad_norm)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
if not is_overflow and rank == 0:
duration = time.perf_counter() - start
print("step:{} -- train lost {:.6f} -- Grad Norm {:.6f} -- speed {:.2f}s/it".format(
iteration, reduced_loss, grad_norm, duration))
logger.log_training(
reduced_loss, grad_norm, learning_rate, duration, iteration)
if not is_overflow and (iteration % hparams.iters_per_checkpoint == 0) and (iteration > 100):
validate(model, criterion, valset, iteration,
hparams.batch_size, n_gpus, collate_fn, logger,
hparams.distributed_run, rank)
if rank == 0:
checkpoint_path = os.path.join(
output_directory, "checkpoint_{}".format(iteration))
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str,default='outdir',
help='directory to save checkpoints')
parser.add_argument('-l', '--log_directory', type=str,default='logdir',
help='directory to save tensorboard logs')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
required=False, help='checkpoint path')
parser.add_argument('--warm_start', action='store_true',
help='load model weights only, ignore specified layers')
parser.add_argument('--n_gpus', type=int, default=1,
required=False, help='number of gpus')
parser.add_argument('--rank', type=int, default=0,
required=False, help='rank of current gpu')
parser.add_argument('--group_name', type=str, default='group_name',
required=False, help='Distributed group name')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
torch.backends.cudnn.enabled = hparams.cudnn_enabled
torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
print("FP16 Run:", hparams.fp16_run)
print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
print("Distributed Run:", hparams.distributed_run)
print("cuDNN Enabled:", hparams.cudnn_enabled)
print("cuDNN Benchmark:", hparams.cudnn_benchmark)
train(args.output_directory, args.log_directory, args.checkpoint_path,
args.warm_start, args.n_gpus, args.rank, args.group_name, hparams)
| [
"torch.cuda.manual_seed",
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.distributed.all_reduce",
"torch.load",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
] | 1.4.0 | Thien223/Multilanguage_Tacotron_2 | ee93c23117b317e5f7bda95aea45bf3095893c0a |
0.1 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from ax.core.types import TConfig, TGenMetadata
from ax.models.model_utils import get_observed
from ax.models.torch.botorch import BotorchModel, get_rounding_func
from ax.models.torch.utils import _get_X_pending_and_observed
from ax.models.torch_base import TorchModel
from ax.utils.common.docutils import copy_doc
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import PosteriorMean
from botorch.acquisition.cost_aware import InverseCostWeightedUtility
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.max_value_entropy_search import (
qMaxValueEntropy,
qMultiFidelityMaxValueEntropy,
)
from botorch.acquisition.objective import ScalarizedObjective
from botorch.acquisition.utils import (
expand_trace_observations,
project_to_target_fidelity,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.models.cost import AffineFidelityCostModel
from botorch.models.model import Model
from botorch.optim.optimize import optimize_acqf
from torch import Tensor
class MaxValueEntropySearch(BotorchModel):
r"""Max-value entropy search.
Args:
cost_intercept: The cost intercept for the affine cost of the form
`cost_intercept + n`, where `n` is the number of generated points.
Only used for multi-fidelity optimzation (i.e., if fidelity_features
are present).
linear_truncated: If `False`, use an alternate downsampling + exponential
decay Kernel instead of the default `LinearTruncatedFidelityKernel`
(only relevant for multi-fidelity optimization).
kwargs: Model-specific kwargs.
"""
def __init__(
self, cost_intercept: float = 1.0, linear_truncated: bool = True, **kwargs: Any
) -> None:
super().__init__(linear_truncated=linear_truncated, **kwargs)
self.cost_intercept = cost_intercept
@copy_doc(TorchModel.gen)
def gen(
self,
n: int,
bounds: List,
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
pending_observations: Optional[List[Tensor]] = None,
model_gen_options: Optional[TConfig] = None,
rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
target_fidelities: Optional[Dict[int, float]] = None,
) -> Tuple[Tensor, Tensor, TGenMetadata]:
if linear_constraints is not None or outcome_constraints is not None:
raise UnsupportedError(
"Constraints are not yet supported by max-value entropy search!"
)
if len(objective_weights) > 1:
raise UnsupportedError(
"Models with multiple outcomes are not yet supported by MES!"
)
options = model_gen_options or {}
acf_options = options.get("acquisition_function_kwargs", {})
optimizer_options = options.get("optimizer_kwargs", {})
X_pending, X_observed = _get_X_pending_and_observed(
Xs=self.Xs,
pending_observations=pending_observations,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
bounds=bounds,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
)
# get the acquisition function
num_fantasies = acf_options.get("num_fantasies", 16)
num_mv_samples = acf_options.get("num_mv_samples", 10)
num_y_samples = acf_options.get("num_y_samples", 128)
candidate_size = acf_options.get("candidate_size", 1000)
num_restarts = optimizer_options.get("num_restarts", 40)
raw_samples = optimizer_options.get("raw_samples", 1024)
# generate the discrete points in the design space to sample max values
bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
bounds_ = bounds_.transpose(0, 1)
candidate_set = torch.rand(candidate_size, bounds_.size(1))
candidate_set = bounds_[0] + (bounds_[1] - bounds_[0]) * candidate_set
acq_function = _instantiate_MES(
model=self.model, # pyre-ignore: [6]
candidate_set=candidate_set,
num_fantasies=num_fantasies,
num_trace_observations=options.get("num_trace_observations", 0),
num_mv_samples=num_mv_samples,
num_y_samples=num_y_samples,
X_pending=X_pending,
maximize=True if objective_weights[0] == 1 else False,
target_fidelities=target_fidelities,
fidelity_weights=options.get("fidelity_weights"),
cost_intercept=self.cost_intercept,
)
# optimize and get new points
botorch_rounding_func = get_rounding_func(rounding_func)
candidates, _ = optimize_acqf(
acq_function=acq_function,
bounds=bounds_,
q=n,
inequality_constraints=None,
fixed_features=fixed_features,
post_processing_func=botorch_rounding_func,
num_restarts=num_restarts,
raw_samples=raw_samples,
options={
"batch_limit": optimizer_options.get("batch_limit", 8),
"maxiter": optimizer_options.get("maxiter", 200),
"method": "L-BFGS-B",
"nonnegative": optimizer_options.get("nonnegative", False),
},
sequential=True,
)
new_x = candidates.detach().cpu()
return new_x, torch.ones(n, dtype=self.dtype), {}
def best_point(
self,
bounds: List[Tuple[float, float]],
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
model_gen_options: Optional[TConfig] = None,
target_fidelities: Optional[Dict[int, float]] = None,
) -> Optional[Tensor]:
"""
Identify the current best point, satisfying the constraints in the same
format as to gen.
Return None if no such point can be identified.
Args:
bounds: A list of (lower, upper) tuples for each column of X.
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b.
linear_constraints: A tuple of (A, b). For k linear constraints on
d-dimensional x, A is (k x d) and b is (k x 1) such that
A x <= b.
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value in the best point.
model_gen_options: A config dictionary that can contain
model-specific options.
target_fidelities: A map {feature_index: value} of fidelity feature
column indices to their respective target fidelities. Used for
multi-fidelity optimization.
Returns:
d-tensor of the best point.
"""
if linear_constraints is not None or outcome_constraints is not None:
raise UnsupportedError(
"Constraints are not yet supported by max-value entropy search!"
)
options = model_gen_options or {}
fixed_features = fixed_features or {}
optimizer_options = options.get("optimizer_kwargs", {})
X_observed = get_observed(
Xs=self.Xs,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
)
acq_function, non_fixed_idcs = self._get_best_point_acqf(
X_observed=X_observed, # pyre-ignore: [6]
objective_weights=objective_weights,
fixed_features=fixed_features,
target_fidelities=target_fidelities,
)
return_best_only = optimizer_options.get("return_best_only", True)
bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
bounds_ = bounds_.transpose(0, 1)
if non_fixed_idcs is not None:
bounds_ = bounds_[..., non_fixed_idcs]
candidates, _ = optimize_acqf(
acq_function=acq_function,
bounds=bounds_,
q=1,
num_restarts=optimizer_options.get("num_restarts", 60),
raw_samples=optimizer_options.get("raw_samples", 1024),
inequality_constraints=None,
fixed_features=None, # handled inside the acquisition function
options={
"batch_limit": optimizer_options.get("batch_limit", 8),
"maxiter": optimizer_options.get("maxiter", 200),
"nonnegative": optimizer_options.get("nonnegative", False),
"method": "L-BFGS-B",
},
return_best_only=return_best_only,
)
rec_point = candidates.detach().cpu()
if isinstance(acq_function, FixedFeatureAcquisitionFunction):
rec_point = acq_function._construct_X_full(rec_point)
if return_best_only:
rec_point = rec_point.view(-1)
return rec_point
def _get_best_point_acqf(
self,
X_observed: Tensor,
objective_weights: Tensor,
fixed_features: Optional[Dict[int, float]] = None,
target_fidelities: Optional[Dict[int, float]] = None,
) -> Tuple[AcquisitionFunction, Optional[List[int]]]:
fixed_features = fixed_features or {}
target_fidelities = target_fidelities or {}
objective = ScalarizedObjective(weights=objective_weights)
acq_function = PosteriorMean(
model=self.model, objective=objective # pyre-ignore: [6]
)
if self.fidelity_features:
# we need to optimize at the target fidelities
if any(f in self.fidelity_features for f in fixed_features):
raise RuntimeError("Fixed features cannot also be fidelity features")
elif not set(self.fidelity_features) == set(target_fidelities):
raise RuntimeError(
"Must provide a target fidelity for every fidelity feature"
)
# make sure to not modify fixed_features in-place
fixed_features = {**fixed_features, **target_fidelities}
elif target_fidelities:
raise RuntimeError(
"Must specify fidelity_features in fit() when using target fidelities"
)
if fixed_features:
acq_function = FixedFeatureAcquisitionFunction(
acq_function=acq_function,
d=X_observed.size(-1),
columns=list(fixed_features.keys()),
values=list(fixed_features.values()),
)
non_fixed_idcs = [
i for i in range(self.Xs[0].size(-1)) if i not in fixed_features
]
else:
non_fixed_idcs = None
return acq_function, non_fixed_idcs
def _instantiate_MES(
model: Model,
candidate_set: Tensor,
num_fantasies: int = 16,
num_mv_samples: int = 10,
num_y_samples: int = 128,
use_gumbel: bool = True,
X_pending: Optional[Tensor] = None,
maximize: bool = True,
num_trace_observations: int = 0,
target_fidelities: Optional[Dict[int, float]] = None,
fidelity_weights: Optional[Dict[int, float]] = None,
cost_intercept: float = 1.0,
) -> qMaxValueEntropy:
if target_fidelities:
if fidelity_weights is None:
fidelity_weights = {f: 1.0 for f in target_fidelities}
if not set(target_fidelities) == set(fidelity_weights):
raise RuntimeError(
"Must provide the same indices for target_fidelities "
f"({set(target_fidelities)}) and fidelity_weights "
f" ({set(fidelity_weights)})."
)
cost_model = AffineFidelityCostModel(
fidelity_weights=fidelity_weights, fixed_cost=cost_intercept
)
cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model)
def project(X: Tensor) -> Tensor:
return project_to_target_fidelity(X=X, target_fidelities=target_fidelities)
def expand(X: Tensor) -> Tensor:
return expand_trace_observations(
X=X,
fidelity_dims=sorted(target_fidelities), # pyre-ignore: [6]
num_trace_obs=num_trace_observations,
)
return qMultiFidelityMaxValueEntropy(
model=model,
candidate_set=candidate_set,
num_fantasies=num_fantasies,
num_mv_samples=num_mv_samples,
num_y_samples=num_y_samples,
X_pending=X_pending,
maximize=maximize,
cost_aware_utility=cost_aware_utility,
project=project,
expand=expand,
)
return qMaxValueEntropy(
model=model,
candidate_set=candidate_set,
num_fantasies=num_fantasies,
num_mv_samples=num_mv_samples,
num_y_samples=num_y_samples,
X_pending=X_pending,
maximize=maximize,
)
| [
"torch.tensor",
"torch.ones"
] | 0.1.3 | xiecong/Ax | f6501807bbc6bb952d636391231ebeb10646769a |
1.7 | import random
import torch
from torch import nn
from recbole.model.abstract_recommender import SequentialRecommender
from recbole.model.loss import BPRLoss
import torch.nn.functional as F
class ComiRec_SA(SequentialRecommender):
r"""ComiRec_SA is a model that incorporate Capsule Network for recommendation.
Note:
Regarding the innovation of this article,we can only achieve the data augmentation mentioned
in the paper and directly output the embedding of the item,
in order that the generation method we used is common to other sequential models.
"""
def __init__(self, config, dataset):
super(ComiRec_SA, self).__init__(config, dataset)
# load parameters info
# self.ITEM_SEQ = self.ITEM_ID + config['LIST_SUFFIX']
# self.ITEM_SEQ_LEN = config['ITEM_LIST_LENGTH_FIELD']
# self.embedding_size = config['embedding_size']
self.hidden_size = config['hidden_size'] # hidden_size就是embedding_size
self.interest_num = config['interest_num']
self.num_heads = config['interest_num']
self.loss_type = config['loss_type']
self.hard_readout = config['hard_readout']
self.add_pos = True
# self.add_pos = config['add_pos'] # bool值, 默认为True
self.item_embedding = nn.Embedding(self.n_items, self.hidden_size, padding_idx=0)
if self.add_pos:
self.position_embedding = nn.Parameter(
torch.Tensor(1, self.max_seq_length, self.hidden_size)) # 论文中max_seq_len是50
self.linear1 = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size * 4, bias=False),
nn.Tanh()
)
self.linear2 = nn.Linear(self.hidden_size * 4, self.num_heads, bias=False)
if self.loss_type == 'BPR':
self.loss_fct = BPRLoss()
elif self.loss_type == 'CE':
self.loss_fct = nn.CrossEntropyLoss()
else:
raise NotImplementedError("Make sure 'loss_type' in ['BPR', 'CE']!")
for weight in self.parameters():
torch.nn.init.kaiming_normal_(weight)
def forward(self, item_seq):
mask = item_seq.clone()
mask[mask != 0] = 1 # batch_size * max_len
item_seq_emb = self.item_embedding(item_seq) # [batch_size, max_len, embedding_size]
item_seq_emb = item_seq_emb * torch.reshape(mask, (-1, self.max_seq_length, 1))
# mask_shape = batch_size * max_seq_len
item_seq_emb = torch.reshape(item_seq_emb, (-1, self.max_seq_length, self.hidden_size))
if self.add_pos:
# 位置嵌入堆叠一个batch,然后历史物品嵌入相加
item_seq_emb_pos = item_seq_emb + self.position_embedding.repeat(item_seq_emb.shape[0], 1, 1)
else:
item_seq_emb_pos = item_seq_emb
# shape=(batch_size, maxlen, hidden_size*4)
item_hidden = self.linear1(item_seq_emb_pos)
# shape=(batch_size, maxlen, interest_num)
item_att_w = self.linear2(item_hidden)
# shape=(batch_size, interest_num, maxlen)
item_att_w = torch.transpose(item_att_w, 2, 1).contiguous()
# shape=(batch_size, interest_num, maxlen)
atten_mask = torch.unsqueeze(mask, dim=1).repeat(1, self.interest_num, 1)
paddings = torch.ones_like(atten_mask, dtype=torch.float) * (-2 ** 32 + 1) # softmax之后无限接近于0
item_att_w = torch.where(torch.eq(atten_mask, 0), paddings, item_att_w)
item_att_w = F.softmax(item_att_w, dim=-1)
# 矩阵A,shape=(batch_size, interest_num, maxlen)
'''
torch.where(condition, x, y)-->Tensor
condition是条件,x和y是同shape的矩阵,针对矩阵的某个位置元素,满足条件就返回x,不满足就返回y
torch.eq(a, b)
比较两个矩阵中所有对应位置的元素是否相同
举例: torch.eq(a, b)
tensor([[False, False, False],
[False, False, False]])
'''
# interest_emb,即论文中的部分
interest_emb = torch.matmul(item_att_w, item_seq_emb)
# item_att_w shape=(batch_size, interest_num, maxlen)
# item_seq_emb shape=(batch_size, maxlen, embedding_dim) embedding_dim 就是 hidden_size
# shape=(batch_size, interest_num, embedding_dim)
# 用户多兴趣向量
user_eb = interest_emb
# shape=(batch_size, interest_num, embedding_dim)
return user_eb
def calculate_loss(self, interaction):
item_seq = interaction[self.ITEM_SEQ] # 这是一个tensor
item_seq_len = interaction[self.ITEM_SEQ_LEN] # 这是一个tensor
'''
关于interaction:
interaction是一个字典,
举个例子:
interaction=Interaction({'4': torch.zeros(5), '3': torch.zeros(7)})(Interaction在recbole.data.interaction中)
输出:
The batch_size of interaction: 7
4, torch.Size([5]), cpu, torch.float32
3, torch.Size([7]), cpu, torch.float32
interaction['4'] ==>形式等价于上面 interaction[self.ITEM_SEQ]
= tensor([0., 0., 0., 0., 0.])
batch_size取最大值,这里最大的是7,取7【注:batch_size表示第0维】
'''
# seq_output = self.forward(item_seq, item_seq_len)
# k = random.choice((range(4, item_seq.shape[0])
# k = random.choice((range(4, len(list(item_seq)))))
seq_output = self.forward(item_seq)
pos_items = interaction[self.POS_ITEM_ID] # POS_ITEM_ID就是item_id, 就是labels
if self.loss_type == 'BPR':
neg_items = interaction[self.NEG_ITEM_ID]
pos_items_emb = self.item_embedding(pos_items)
neg_items_emb = self.item_embedding(neg_items)
pos_score = torch.sum(seq_output * pos_items_emb, dim=-1) # [B]
neg_score = torch.sum(seq_output * neg_items_emb, dim=-1) # [B]
loss = self.loss_fct(pos_score, neg_score)
return loss
else: # self.loss_type = 'CE'
test_item_emb = self.item_embedding.weight
logits = torch.matmul(seq_output, test_item_emb.transpose(0, 1))
loss = self.loss_fct(logits.sum(1), pos_items)
# loss = self.loss_fct(logits, pos_items.unsqueeze(0).repeat(1683, 1).T)
return loss
def predict(self, interaction):
item_seq = interaction[self.ITEM_SEQ] # shape=(batch_size,max_len)
test_item = interaction[self.ITEM_ID]
seq_output = self.forward(item_seq)
label_eb = self.item_embedding(test_item)
atten = torch.matmul(seq_output, # shape=(batch_size, interest_num, hidden_size)
torch.reshape(label_eb, (-1, self.hidden_size, 1))
# shape=(batch_size, hidden_size, 1)
) # shape=(batch_size, interest_num, 1)
atten = F.softmax(torch.pow(torch.reshape(atten, (-1, self.interest_num)), 1),
dim=-1) # shape=(batch_size, interest_num)
if self.hard_readout: # 选取interest_num个兴趣胶囊中的一个,MIND和ComiRec都是用的这种方式
readout = torch.reshape(seq_output, (-1, self.hidden_size))[
(torch.argmax(atten, dim=-1) + torch.arange(label_eb.shape[0]) * self.interest_num).long()]
else: # 综合interest_num个兴趣胶囊,论文及代码实现中没有使用这种方法
readout = torch.matmul(torch.reshape(atten, (label_eb.shape[0], 1, self.interest_num)), seq_output)
# shape=(batch_size, 1, interest_num)
# shape=(batch_size, interest_num, hidden_size)
# shape=(batch_size, 1, hidden_size)
readout = torch.reshape(readout, (seq_output.shape[0], self.hidden_size)) # shape=(batch_size, hidden_size)
# scores是vu堆叠成的矩阵(一个batch的vu)(vu可以说就是最终的用户嵌入)
scores = readout.sum(dim=1) # [B]
return scores
def full_sort_predict(self, interaction): # interaction是一个字典
item_seq = interaction[self.ITEM_SEQ] # shape=(batch_size,max_len)
test_item = interaction[self.ITEM_ID] # batch_size * n
item_seq_len = interaction[self.ITEM_SEQ_LEN] # shape=(batch_size, seq_len)
# k = random.choice((range(4, item_seq.shape[0])
# k = random.choice(range(4, item_seq.shape[0]))
# label_seq = []
# mask = []
# if k >= item_seq_len:
# label_seq.append(list(item_seq.numpy().keys())[k - item_seq_len:k])
# mask.append(list(item_seq.numpy().keys())[[1.0] * item_seq_len])
# else:
# label_seq.append(list(item_seq.numpy().keys())[:k] + [0] * (item_seq_len - k))
# mask.append(list(item_seq.numpy().keys())[[1.0] * k + [0.0] * (item_seq_len - k)])
# label_seq = list(map(float, label_seq))
# mask = list(map(float, mask))
# label_seq = torch.tensor(label_seq)
# mask = torch.tensor(mask)
seq_output = self.forward(item_seq)
# 这里seq_output就是user_eb
# 这个模型训练过程中label是可见的,此处的item_eb就是label物品的嵌入
label_eb = self.item_embedding(test_item)
atten = torch.matmul(seq_output, # shape=(batch_size, interest_num, hidden_size)
torch.reshape(label_eb, (-1, self.hidden_size, 1))
# shape=(batch_size, hidden_size, 1)
) # shape=(batch_size, interest_num, 1)
atten = F.softmax(torch.pow(torch.reshape(atten, (-1, self.interest_num)), 1),
dim=-1) # shape=(batch_size, interest_num)
if self.hard_readout: # 选取interest_num个兴趣胶囊中的一个,MIND和ComiRec都是用的这种方式
readout = torch.reshape(seq_output, (-1, self.hidden_size))[
(torch.argmax(atten, dim=-1) + torch.arange(label_eb.shape[0]) * self.interest_num).long()]
else: # 综合interest_num个兴趣胶囊,论文及代码实现中没有使用这种方法
readout = torch.matmul(torch.reshape(atten, (label_eb.shape[0], 1, self.interest_num)), seq_output)
# shape=(batch_size, 1, interest_num)
# shape=(batch_size, interest_num, hidden_size)
# shape=(batch_size, 1, hidden_size)
readout = torch.reshape(readout, (seq_output.shape[0], self.hidden_size)) # shape=(batch_size, hidden_size)
# scores是vu堆叠成的矩阵(一个batch的vu)(vu可以说就是最终的用户嵌入)
all_items = self.item_embedding.weight
scores = torch.matmul(readout, all_items.transpose(1, 0)) # (batch_size, n)
return scores
| [
"torch.nn.Linear",
"torch.argmax",
"torch.eq",
"torch.arange",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.nn.init.kaiming_normal_",
"torch.unsqueeze",
"torch.sum",
"torch.ones_like",
"torch.nn.functional.softmax",
"torch.transpose",
"torch.Tensor",
"torch.matmul",
"torch.nn.Embedding",
"torch.reshape"
] | 1.7.0 | leelige/RecBole | 66eb93d7b6f416cd0f603a0a5cf2bef94e80f658 |
1.1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Decoder definition."""
import logging
from typing import Any
from typing import List
from typing import Tuple
import torch
from espnet.nets.pytorch_backend.nets_utils import rename_state_dict
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder_layer import DecoderLayer
from espnet.nets.pytorch_backend.transformer.dynamic_conv import DynamicConvolution
from espnet.nets.pytorch_backend.transformer.dynamic_conv2d import DynamicConvolution2D
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.lightconv import LightweightConvolution
from espnet.nets.pytorch_backend.transformer.lightconv2d import LightweightConvolution2D
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.scorer_interface import BatchScorerInterface
def _pre_hook(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
# https://github.com/espnet/espnet/commit/3d422f6de8d4f03673b89e1caef698745ec749ea#diff-bffb1396f038b317b2b64dd96e6d3563
rename_state_dict(prefix + "output_norm.", prefix + "after_norm.", state_dict)
class Decoder(BatchScorerInterface, torch.nn.Module):
"""Transfomer decoder module.
Args:
odim (int): Output diminsion.
self_attention_layer_type (str): Self-attention layer type.
attention_dim (int): Dimention of attention.
attention_heads (int): The number of heads of multi head attention.
conv_wshare (int): The number of kernel of convolution. Only used in
self_attention_layer_type == "lightconv*" or "dynamiconv*".
conv_kernel_length (Union[int, str]): Kernel size str of convolution
(e.g. 71_71_71_71_71_71). Only used in self_attention_layer_type
== "lightconv*" or "dynamiconv*".
conv_usebias (bool): Whether to use bias in convolution. Only used in
self_attention_layer_type == "lightconv*" or "dynamiconv*".
linear_units (int): The number of units of position-wise feed forward.
num_blocks (int): The number of decoder blocks.
dropout_rate (float): Dropout rate.
positional_dropout_rate (float): Dropout rate after adding positional encoding.
self_attention_dropout_rate (float): Dropout rate in self-attention.
src_attention_dropout_rate (float): Dropout rate in source-attention.
input_layer (Union[str, torch.nn.Module]): Input layer type.
use_output_layer (bool): Whether to use output layer.
pos_enc_class (torch.nn.Module): Positional encoding module class.
`PositionalEncoding `or `ScaledPositionalEncoding`
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
"""
def __init__(
self,
odim,
selfattention_layer_type="selfattn",
attention_dim=256,
attention_heads=4,
conv_wshare=4,
conv_kernel_length=11,
conv_usebias=False,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
self_attention_dropout_rate=0.0,
src_attention_dropout_rate=0.0,
input_layer="embed",
use_output_layer=True,
pos_enc_class=PositionalEncoding,
normalize_before=True,
concat_after=False,
):
"""Construct an Decoder object."""
torch.nn.Module.__init__(self)
self._register_load_state_dict_pre_hook(_pre_hook)
if input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(odim, attention_dim),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(odim, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer, pos_enc_class(attention_dim, positional_dropout_rate)
)
else:
raise NotImplementedError("only `embed` or torch.nn.Module is supported.")
self.normalize_before = normalize_before
# self-attention module definition
if selfattention_layer_type == "selfattn":
logging.info("decoder self-attention layer type = self-attention")
decoder_selfattn_layer = MultiHeadedAttention
decoder_selfattn_layer_args = [
(
attention_heads,
attention_dim,
self_attention_dropout_rate,
)
] * num_blocks
elif selfattention_layer_type == "lightconv":
logging.info("decoder self-attention layer type = lightweight convolution")
decoder_selfattn_layer = LightweightConvolution
decoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
self_attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
True,
conv_usebias,
)
for lnum in range(num_blocks)
]
elif selfattention_layer_type == "lightconv2d":
logging.info(
"decoder self-attention layer "
"type = lightweight convolution 2-dimentional"
)
decoder_selfattn_layer = LightweightConvolution2D
decoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
self_attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
True,
conv_usebias,
)
for lnum in range(num_blocks)
]
elif selfattention_layer_type == "dynamicconv":
logging.info("decoder self-attention layer type = dynamic convolution")
decoder_selfattn_layer = DynamicConvolution
decoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
self_attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
True,
conv_usebias,
)
for lnum in range(num_blocks)
]
elif selfattention_layer_type == "dynamicconv2d":
logging.info(
"decoder self-attention layer type = dynamic convolution 2-dimentional"
)
decoder_selfattn_layer = DynamicConvolution2D
decoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
self_attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
True,
conv_usebias,
)
for lnum in range(num_blocks)
]
self.decoders = repeat(
num_blocks,
lambda lnum: DecoderLayer(
attention_dim,
decoder_selfattn_layer(*decoder_selfattn_layer_args[lnum]),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
)
self.selfattention_layer_type = selfattention_layer_type
if self.normalize_before:
self.after_norm = LayerNorm(attention_dim)
if use_output_layer:
self.output_layer = torch.nn.Linear(attention_dim, odim)
else:
self.output_layer = None
def forward(self, tgt, tgt_mask, memory, memory_mask):
#print("in forward")
"""Forward decoder.
Args:
tgt (torch.Tensor): Input token ids, int64 (#batch, maxlen_out) if
input_layer == "embed". In the other case, input tensor
(#batch, maxlen_out, odim).
tgt_mask (torch.Tensor): Input token mask (#batch, maxlen_out).
dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+
(include 1.2).
memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, feat).
memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+
(include 1.2).
Returns:
torch.Tensor: Decoded token score before softmax (#batch, maxlen_out, odim)
if use_output_layer is True. In the other case,final block outputs
(#batch, maxlen_out, attention_dim).
torch.Tensor: Score mask before softmax (#batch, maxlen_out).
"""
x = self.embed(tgt)
x, tgt_mask, memory, memory_mask = self.decoders(
x, tgt_mask, memory, memory_mask
)
if self.normalize_before:
x = self.after_norm(x)
if self.output_layer is not None:
x = self.output_layer(x)
#logging.info( "Shape x{}".format(x.shape))
#logging.info( "Shape mask {}".format(tgt_mask.shape))
return x, tgt_mask
def forward_one_step(self, tgt, tgt_mask, memory, cache=None):
"""Forward one step.
Args:
tgt (torch.Tensor): Input token ids, int64 (#batch, maxlen_out).
tgt_mask (torch.Tensor): Input token mask (#batch, maxlen_out).
dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+
(include 1.2).
memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, feat).
cache (List[torch.Tensor]): List of cached tensors.
Each tensor shape should be (#batch, maxlen_out - 1, size).
Returns:
torch.Tensor: Output tensor (batch, maxlen_out, odim).
List[torch.Tensor]: List of cache tensors of each decoder layer.
"""
#print("in forward")
x = self.embed(tgt)
if cache is None:
cache = [None] * len(self.decoders)
new_cache = []
for c, decoder in zip(cache, self.decoders):
x, tgt_mask, memory, memory_mask = decoder(
x, tgt_mask, memory, None, cache=c
)
new_cache.append(x)
if self.normalize_before:
y = self.after_norm(x[:, -1])
else:
y = x[:, -1]
if self.output_layer is not None:
y = torch.log_softmax(self.output_layer(y), dim=-1)
#logging.info( "Shape x{}".format(x.shape))
#logging.info( "Shape mask {}".format(tgt_mask.shape))
return y, new_cache
# beam search API (see ScorerInterface)
def score(self, ys, state, x):
"""Score."""
ys_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0)
if self.selfattention_layer_type != "selfattn":
# TODO(karita): implement cache
logging.warning(
f"{self.selfattention_layer_type} does not support cached decoding."
)
state = None
logp, state = self.forward_one_step(
ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state
)
return logp.squeeze(0), state
# batch beam search API (see BatchScorerInterface)
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch (required).
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
n_layers = len(self.decoders)
if states[0] is None:
batch_state = None
else:
# transpose state of [batch, layer] into [layer, batch]
batch_state = [
torch.stack([states[b][i] for b in range(n_batch)])
for i in range(n_layers)
]
# batch decoding
ys_mask = subsequent_mask(ys.size(-1), device=xs.device).unsqueeze(0)
logp, states = self.forward_one_step(ys, ys_mask, xs, cache=batch_state)
# transpose state of [layer, batch] into [batch, layer]
state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
return logp, state_list
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.Module.__init__",
"torch.nn.ReLU",
"torch.nn.Embedding"
] | 1.1.0 | drumilT/espnet | 944de63aa9ede23cefca7b3d092150ea52e7f1b2 |
1.3 | import numpy as np
import scipy.signal
from gym.spaces import Box, Discrete
import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def count_vars(module):
return sum([np.prod(p.shape) for p in module.parameters()])
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
class Actor(nn.Module):
def _distribution(self, obs):
raise NotImplementedError
def _log_prob_from_distribution(self, pi, act):
raise NotImplementedError
def forward(self, obs, act=None):
# Produce action distributions for given observations, and
# optionally compute the log likelihood of given actions under
# those distributions.
pi = self._distribution(obs)
logp_a = None
if act is not None:
logp_a = self._log_prob_from_distribution(pi, act)
return pi, logp_a
class CategoricalActor(Actor):
def __init__(self, obs_dim, act_dim, model, model_kwargs):
super().__init__()
model_kwargs["input_sizes"] = [obs_dim] + model_kwargs["input_sizes"]
model_kwargs["output_sizes"] += [act_dim]
self.logits_net = model(**model_kwargs)
def _distribution(self, obs):
logits = self.logits_net(obs)
return Categorical(logits=logits)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act)
class GaussianActor(Actor):
def __init__(self, obs_dim, act_dim, model, model_kwargs):
super().__init__()
log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))
model_kwargs["input_sizes"] = [obs_dim] + model_kwargs["input_sizes"]
model_kwargs["output_sizes"] += [act_dim]
self.mu_net = model(**model_kwargs)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act).sum(axis=-1) # Last axis sum needed for Torch Normal distribution
class Critic(nn.Module):
def __init__(self, obs_dim, model, model_kwargs):
super().__init__()
model_kwargs["input_sizes"] = [obs_dim] + model_kwargs["input_sizes"]
model_kwargs["output_sizes"] += [1]
self.v_net = model(**model_kwargs)
def forward(self, obs):
return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.
class ActorCritic(nn.Module):
def __init__(self, observation_space, action_space, model, model_kwargs_getter):
super().__init__()
obs_dim = observation_space.shape[0]
# policy builder depends on action space
if isinstance(action_space, Box):
self.pi = GaussianActor(obs_dim, action_space.shape[0], model, model_kwargs_getter())
elif isinstance(action_space, Discrete):
self.pi = CategoricalActor(obs_dim, action_space.n, model, model_kwargs_getter())
# build value function
self.v = Critic(obs_dim, model, model_kwargs_getter())
def step(self, obs):
with torch.no_grad():
pi = self.pi._distribution(obs)
a = pi.sample()
logp_a = self.pi._log_prob_from_distribution(pi, a)
v = self.v(obs)
return a.cpu().numpy(), v.cpu().numpy(), logp_a.cpu().numpy()
def act(self, obs):
return self.step(obs)[0] | [
"torch.no_grad",
"torch.distributions.normal.Normal",
"torch.distributions.categorical.Categorical",
"torch.as_tensor",
"torch.exp"
] | 1.3.1 | florianHoidn/spinningup | 419ca30849aae11ac53b9421094d2212d4cad652 |
1.5 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.networks.nets import HighResNet
from tests.utils import test_script_save
device = "cuda" if torch.cuda.is_available() else "cpu"
TEST_CASE_1 = [ # single channel 3D, batch 16
{"spatial_dims": 3, "in_channels": 1, "out_channels": 3, "norm_type": "instance"},
(16, 1, 32, 24, 48),
(16, 3, 32, 24, 48),
]
TEST_CASE_2 = [ # 4-channel 3D, batch 1
{"spatial_dims": 3, "in_channels": 4, "out_channels": 3, "acti_type": "relu6"},
(1, 4, 17, 64, 48),
(1, 3, 17, 64, 48),
]
TEST_CASE_3 = [ # 4-channel 2D, batch 7
{"spatial_dims": 2, "in_channels": 4, "out_channels": 3},
(7, 4, 64, 48),
(7, 3, 64, 48),
]
TEST_CASE_4 = [ # 4-channel 1D, batch 16
{"spatial_dims": 1, "in_channels": 4, "out_channels": 3, "dropout_prob": 0.1},
(16, 4, 63),
(16, 3, 63),
]
class TestHighResNet(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])
def test_shape(self, input_param, input_shape, expected_shape):
net = HighResNet(**input_param).to(device)
net.eval()
with torch.no_grad():
result = net.forward(torch.randn(input_shape).to(device))
self.assertEqual(result.shape, expected_shape)
def test_script(self):
input_param, input_shape, expected_shape = TEST_CASE_1
net = HighResNet(**input_param)
test_data = torch.randn(input_shape)
out_orig, out_reloaded = test_script_save(net, test_data)
assert torch.allclose(out_orig, out_reloaded)
if __name__ == "__main__":
unittest.main()
| [
"torch.allclose",
"torch.no_grad",
"torch.cuda.is_available",
"torch.randn"
] | 1.5 | LucasFidon/MONAI | a7ef9d567775dd7a222f93bab08191c0e3532c92 |
1.0 | """ Config class for search/augment """
import argparse
import os
import genotypes as gt
from functools import partial
import torch
def get_parser(name):
""" make default formatted parser """
parser = argparse.ArgumentParser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# print default value always
parser.add_argument = partial(parser.add_argument, help=' ')
return parser
def parse_gpus(gpus):
if gpus == 'all':
return list(range(torch.cuda.device_count()))
else:
return [int(s) for s in gpus.split(',')]
class BaseConfig(argparse.Namespace):
def print_params(self, prtf=print):
prtf("")
prtf("Parameters:")
for attr, value in sorted(vars(self).items()):
prtf("{}={}".format(attr.upper(), value))
prtf("")
def as_markdown(self):
""" Return configs as markdown format """
text = "|name|value| \n|-|-| \n"
for attr, value in sorted(vars(self).items()):
text += "|{}|{}| \n".format(attr, value)
return text
class SearchConfig(BaseConfig):
def build_parser(self):
parser = get_parser("Search config")
parser.add_argument('--name', required=True)
parser.add_argument('--dataset', required=True, help='CIFAR10 / MNIST / FashionMNIST')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--w_lr', type=float, default=0.025, help='lr for weights')
parser.add_argument('--w_lr_min', type=float, default=0.001, help='minimum lr for weights')
parser.add_argument('--w_momentum', type=float, default=0.9, help='momentum for weights')
parser.add_argument('--w_weight_decay', type=float, default=3e-4,
help='weight decay for weights')
parser.add_argument('--w_grad_clip', type=float, default=5.,
help='gradient clipping for weights')
parser.add_argument('--print_freq', type=int, default=50, help='print frequency')
parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. '
'`all` indicates use all gpus.')
parser.add_argument('--epochs', type=int, default=50, help='# of training epochs')
parser.add_argument('--init_channels', type=int, default=16)
parser.add_argument('--layers', type=int, default=8, help='# of layers')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--workers', type=int, default=4, help='# of workers')
parser.add_argument('--alpha_lr', type=float, default=3e-4, help='lr for alpha')
parser.add_argument('--alpha_weight_decay', type=float, default=1e-3,
help='weight decay for alpha')
parser.add_argument('--n_nodes', type=int, default=4, help='nodes count in searched cell (without input)')
parser.add_argument('--ops_set', type=int, default=1, help='operations set')
parser.add_argument('--hv_type', type=int, default=1, help='')
parser.add_argument('--multi_avg_rank', type=int, default=0, help='')
parser.add_argument('--multi_avg_size', type=int, default=0, help='')
parser.add_argument('--proxyless', action = "store_true", help='')
parser.add_argument('--sbp', action = "store_true", help='')
parser.add_argument('--az', action = "store_true", help='')
parser.add_argument('--smart_sample', action = "store_true", help='')
return parser
def __init__(self):
parser = self.build_parser()
args = parser.parse_args()
super().__init__(**vars(args))
self.data_path = './data/'
self.path = os.path.join('searchs', self.name)
self.plot_path = os.path.join(self.path, 'plots')
self.gpus = parse_gpus(self.gpus)
class AugmentConfig(BaseConfig):
def build_parser(self):
parser = get_parser("Augment config")
parser.add_argument('--name', required=True)
parser.add_argument('--dataset', required=True, help='CIFAR10 / MNIST / FashionMNIST')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--lr', type=float, default=0.025, help='lr for weights')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--grad_clip', type=float, default=5.,
help='gradient clipping for weights')
parser.add_argument('--print_freq', type=int, default=200, help='print frequency')
parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. '
'`all` indicates use all gpus.')
parser.add_argument('--epochs', type=int, default=600, help='# of training epochs')
parser.add_argument('--init_channels', type=int, default=36)
parser.add_argument('--layers', type=int, default=20, help='# of layers')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--workers', type=int, default=4, help='# of workers')
parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path prob')
parser.add_argument('--genotype', required=True, help='Cell genotype')
parser.add_argument('--ops_set', type=int, default=1, help='operations set')
return parser
def __init__(self):
parser = self.build_parser()
args = parser.parse_args()
super().__init__(**vars(args))
self.data_path = './data/'
self.path = os.path.join('augments', self.name)
self.genotype = gt.from_str(self.genotype)
self.gpus = parse_gpus(self.gpus)
| [
"torch.cuda.device_count"
] | 1.0.0 | IlyaTrofimov/pt.darts | 7cda57ad6b0e5802f852c3908619ffa066b277a7 |
1.7 | from typing import Generator, List, Union, Any
import torch
from torch import nn, optim, Tensor
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchtext.data import BucketIterator
from tqdm import tqdm
from src.config import Config
from src.early_stopping import EarlyStopping
class TrainingLoop:
def __init__(self, config: Config, model: nn.Module, optimizer: optim.Optimizer,
train_iterator: DataLoader, val_iterator: DataLoader):
self.config = config
self.model = model
self.train_iterator = train_iterator
self.val_iterator = val_iterator
self.optimizer = optimizer
# Learning rate scheduler
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, self.config.learning_rate_step_size,
gamma=self.config.learning_rate_decay)
# Initialize loss function
self.train_losses = []
self.val_losses = []
self.test_losses = []
self.num_epochs = self.config.num_epochs
# Initialize early stopping criteria
self.early_stopping = EarlyStopping(
model_path=self.config.model_best_checkpoint_path,
patience=self.config.patience
)
# update learning rate
print(f"Learning rate: {self.scheduler.get_lr()[0]}")
self.scheduler.step()
# initialize tensorboard writer
self.writer = SummaryWriter(self.config.model_checkpoint_dir)
def save_latest_model_checkpoint(self, epoch: int):
torch.save({
'epoch': epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'train_loss': self.train_losses[-1],
'val_loss': self.val_losses[-1],
}, self.config.model_latest_checkpoint_path)
def iterate_epoch(self) -> Generator[int, None, None]:
for epoch in range(self.num_epochs):
print(f'Starting epoch {epoch}')
# Iterate over the epochs and train model here
yield epoch
print(f'Train loss: {self.train_losses[-1]}')
print(f'Val loss: {self.val_losses[-1]}')
self.early_stopping.check_early_stopping_criteria(self.model, self.optimizer, epoch,
train_loss=self.train_losses[-1],
val_loss=self.val_losses[-1])
if self.early_stopping.early_stop:
# End epoch iteration if we meet the early stopping criteria
print(f"Triggered early stopping criteria. Stopping after the {epoch}th iteration")
break
# Save latest model (not best)
self.save_latest_model_checkpoint(epoch)
def compute_loss_from_batch(self, batch: Any) -> Tensor:
raise NotImplementedError("Should be implemented by parent class.")
def iterate_train(self, epoch: int) -> None:
epoch_train_loss = 0
self.model.train()
for batch in tqdm(self.train_iterator):
# batch is of type torchtext.data.batch.Batch
# batch has dimension X x N where X is the max sequence length and N is the batch_size
# the values in batch are the indices of the word in data_manager.source_vocab.itos
# Before passing in a new instance, you need to zero out the gradients from the old instance
self.model.zero_grad()
loss = self.compute_loss_from_batch(batch)
# Do the backward pass and update the gradient
loss.backward()
self.optimizer.step()
# Update the total loss for the current epoch
epoch_train_loss += loss.item()
self.writer.add_scalar('Loss/train', epoch_train_loss, epoch)
self.writer.add_scalar('Avg Loss/train', epoch_train_loss / len(self.train_iterator), epoch)
# Record this epochs total loss
self.train_losses.append(epoch_train_loss)
def compute_loss(self, data_iterator: Union[BucketIterator, DataLoader],
losses: List[int]) -> float:
self.model.eval()
epoch_loss = 0
for batch in tqdm(data_iterator):
# Do a forward pass and compute the loss
loss = self.compute_loss_from_batch(batch)
epoch_loss += loss.item()
losses.append(epoch_loss)
return epoch_loss
def compute_validation_loss(self, epoch: int) -> None:
loss = self.compute_loss(self.val_iterator, self.val_losses)
self.writer.add_scalar('Loss/val', loss, epoch)
self.writer.add_scalar('Avg Loss/val', loss / len(self.val_iterator), epoch)
def train(self):
for epoch in self.iterate_epoch():
# Forward and backward pass on training data
self.iterate_train(epoch)
# Forward pass on validation data
self.compute_validation_loss(epoch)
| [
"torch.optim.lr_scheduler.StepLR",
"torch.utils.tensorboard.SummaryWriter"
] | 1.7.1 | BushMinusZero/deep-learning-skunk-works | 9178455c460940adbe6943e2b657c994da4af231 |
1.6 | import torch
from torch import nn
from einops import rearrange, repeat
##################################
# Linformer
##################################
def get_EF(input_size, dim, method="learnable", head_dim=None, bias=True):
"""
Retuns the E or F matrix, initialized via xavier initialization.
This is the recommended way to do it according to the authors of the paper.
Includes a method for convolution, as well as a method for no additional params.
"""
assert method == "learnable" or method == "convolution" or method == "no_params", "The method flag needs to be either 'learnable', 'convolution', or 'no_params'!"
if method == "convolution":
conv = nn.Conv1d(head_dim, head_dim, kernel_size=int(input_size/dim), stride=int(input_size/dim))
return conv
if method == "no_params":
mat = torch.zeros((input_size, dim))
torch.nn.init.normal_(mat, mean=0.0, std=1/dim)
return mat
lin = nn.Linear(input_size, dim, bias)
torch.nn.init.xavier_normal_(lin.weight)
return lin
class linformerAttention(nn.Module):
def __init__(
self,
dim,
dropout,
input_size,
dim_k = 20, # Probably 20? Maybe the dimantion we want K and V be
full_attention = False, # If False it will use linformer implementation
parameter_sharing = None, # The `parameter_sharing` flag has to be either 'none', 'headwise', 'kv', or 'layerwise'."
):
super().__init__()
self.dim = dim
self.dropout = nn.Dropout(dropout)
self.dim_k = dim_k
self.full_attention = full_attention
self.input_size = input_size
self.print_dim = False
self.E = get_EF(input_size, dim = self.dim_k, method = "learnable", head_dim = self.dim)
self.F = get_EF(input_size, dim = self.dim_k, method = "learnable", head_dim = self.dim) if parameter_sharing == "none" or parameter_sharing == "headwise" else self.E
self.is_proj_tensor = isinstance(self.E, torch.Tensor)
def forward(self, q, k, v):
if self.print_dim:
print("matmul(k, e)")
print("k:"+str(k.shape))
print("E:"+str(self.input_size)+", "+str(self.dim_k))
if not self.full_attention:
if self.is_proj_tensor:
# Always go to else
self.E = self.E.to(k.device)
#k = torch.matmul(k, self.E)
b, h, *_ = q.shape
projection_E = repeat(self.E, 'j d -> b h j d', b = b, h = h)
k = torch.einsum('...di,...dj->...ij', k, projection_E)
else:
k = torch.einsum('...ij->...ji', k)
k = self.E(k)
if self.print_dim:
print("matmul(q, k)")
print("q:"+str(q.shape))
print("K:"+str(k.shape))
q = torch.einsum('...id,...dj->...ij', q, k)
P_bar = q/torch.sqrt(torch.tensor(self.dim_k).type(q.type())).to(q.device)
P_bar = P_bar.softmax(dim=-1)
P_bar = self.dropout(P_bar)
if not self.full_attention:
if self.is_proj_tensor:
# WRONG!
self.F = self.F.to(v.device)
v = torch.matmul(v, self.F)
else:
v = torch.einsum('...ij->...ji', v)
v = self.F(v)
out = torch.einsum('...id,...jd->...ij', P_bar, v)
return out
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.einsum",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.init.xavier_normal_",
"torch.matmul"
] | 1.6 | MohammadrezaRezvani/performer-pytorch | 347dd58111f4f79b8991f7609552203609856b4b |
1.8 | import numpy as np
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false" # disable warning when using smart batching
import pandas as pd
from torch.utils.data import Dataset
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import Dataset
from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
import torch
from transformers import AutoTokenizer
import yaml
from argparse import ArgumentParser
from config import CFG
def prepare_args():
parser = ArgumentParser()
parser.add_argument(
"--config",
action="store",
dest="config",
help="Configuration scheme",
default=None,
)
args = parser.parse_args()
print(f'[INFO] Using configuration for {args.config}')
with open(CFG.finetune_config_path) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
for k, v in cfg[args.config].items():
setattr(CFG, k, v)
# reference: https://www.kaggle.com/abhishek/step-1-create-folds
def create_folds(df, num_splits, random_seed):
# we create a new column called kfold and fill it with -1
df["kfold"] = -1
# calculate number of bins by Sturge's rule
# I take the floor of the value, you can also
# just round it
num_bins = int(np.floor(1 + np.log2(len(df))))
# Bin values into discrete intervals.
df.loc[:, "bins"] = pd.cut(
df["target"], bins=num_bins, labels=False
)
# initiate the kfold class from model_selection module
kf = StratifiedKFold(n_splits=num_splits, shuffle=True, random_state=random_seed)
# fill the new kfold column
# note that, instead of targets, we use bins!
for f, (t_, v_) in enumerate(kf.split(X=df, y=df.bins.values)):
df.loc[v_, 'kfold'] = f
# drop the bins column
# df = df.drop("bins", axis=1)
# return dfframe with folds
return df
def get_tokenizer(model_name_or_path):
try:
print('[INFO] Using cached tokenizer...')
return AutoTokenizer.from_pretrained(model_name_or_path, local_files_only=True)
except:
print('[INFO] Downloading tokenizer...')
return AutoTokenizer.from_pretrained(model_name_or_path)
class CommonLitDataset(Dataset):
def __init__(self, df, tokenizer, shuffle=False):
self.df = df
if shuffle:
self.df = self.df.sample(frac=1, random_state=CFG.train_seed).reset_index(drop=True)
self.labeled = 'target' in df.columns
self.tokenizer = tokenizer
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
item = self.df.iloc[idx]
text = item['excerpt']
token = self.tokenizer(text, return_tensors='pt', truncation=True, padding='max_length', max_length=CFG.max_len)
if self.labeled:
target = item['target']
target = torch.tensor(target, dtype=torch.float)
return token['input_ids'].squeeze(), token['attention_mask'].squeeze(), target
else:
return token['input_ids'].squeeze(), token['attention_mask'].squeeze()
def log_message(msg, exp_id):
dir_path = [CFG.output_dir, CFG.params_dir] if CFG.env == 'colab' else [CFG.output_dir]
for path in dir_path:
log_file = os.path.join(path, f'exp_{str(exp_id).zfill(3)}', f'{CFG.model_name}.txt')
with open(log_file, 'a') as f:
f.write(msg + '\n')
if CFG.env == 'local':
'''
Custom checkpoint class wrappers.
'''
from typing import Any, Optional, Union
from pathlib import Path
from pytorch_lightning.utilities import rank_zero_deprecation # import error in kaggle
from pytorch_lightning.utilities.types import STEP_OUTPUT
class CustomModelCheckpointDelayedEval(ModelCheckpoint):
def __init__(
self,
dirpath: Optional[Union[str, Path]] = None,
filename: Optional[str] = None,
monitor: Optional[str] = None,
verbose: bool = False,
save_last: Optional[bool] = None,
save_top_k: Optional[int] = None,
save_weights_only: bool = False,
mode: str = "min",
auto_insert_metric_name: bool = True,
every_n_train_steps: Optional[int] = None,
every_n_val_epochs: Optional[int] = None,
period: Optional[int] = None,
train_steps = 0
):
super().__init__(dirpath=dirpath, filename=filename, monitor=monitor, verbose=verbose, save_last=save_last, save_top_k=save_top_k, save_weights_only=save_weights_only, mode=mode, auto_insert_metric_name=auto_insert_metric_name, every_n_train_steps=every_n_train_steps, every_n_val_epochs=every_n_val_epochs, period=period)
# self.eval_schedule = [(0.50, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1., 1)]
self.eval_interval = CFG.delayed_val_check_interval
self.last_eval_step = 0
# make sure the result is consistant with different `delayed_val_check_ep`
self.delayed_steps = (int(CFG.delayed_val_check_ep * train_steps) // self.eval_interval) * self.eval_interval
print(f'[INFO] Delayed steps before evaluation: {self.delayed_steps}')
self.val_check_mode = False
def on_train_batch_end(
self,
trainer: 'pl.Trainer',
pl_module: 'pl.LightningModule',
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
""" Save checkpoint on train batch end if we meet the criteria for `every_n_train_steps` """
if self._should_skip_saving_checkpoint(trainer):
return
step = trainer.global_step
if step == self.delayed_steps:
self.val_check_mode = True
self.last_eval_step = step
print('[INFO] The val check mode is turned on!')
if self.val_check_mode and step == self.last_eval_step + self.eval_interval:
self.last_eval_step = step
trainer.run_evaluation()
| [
"torch.tensor"
] | 1.8.1 | ofnt/Kaggle-CommonLit-Readability-6th-Place-Solution | 61f5df662d8b5dfb3f80734e4444d52fe0f478cd |
1.4 | import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from torchaudio_unittest.prototype.emformer_test_impl import EmformerTestImpl
class EmformerFloat32CPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class EmformerFloat64CPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
| [
"torch.device"
] | 1.4.0 | popcornell/audio | 7b6b2d000023e2aa3365b769866c5f375e0d5fda |
1.4 | import json
import torch
from parameterized import parameterized
from torchaudio.models.wav2vec2 import (
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
)
from torchaudio.models.wav2vec2.utils import import_huggingface_model
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoModule,
TorchaudioTestCase,
)
def _load_config(*paths):
with open(f'{get_asset_path("wav2vec2", "huggingface", *paths)}.json', "r") as file_:
return json.load(file_)
def _name_func(testcase_func, i, param):
return f"{testcase_func.__name__}_{i}_{param[0][1].__name__}"
# Pretrained
HF_BASE = _load_config("wav2vec2-base")
HF_LARGE = _load_config("wav2vec2-large")
HF_LARGE_LV60 = _load_config("wav2vec2-large-lv60")
HF_LARGE_XLSR_53 = _load_config("wav2vec2-large-xlsr-53")
HF_BASE_10K_VOXPOPULI = _load_config("wav2vec2-base-10k-voxpopuli")
# Finetuned
HF_BASE_960H = _load_config("wav2vec2-base-960h")
HF_LARGE_960H = _load_config("wav2vec2-large-960h")
HF_LARGE_LV60_960H = _load_config("wav2vec2-large-960h-lv60")
HF_LARGE_LV60_SELF_960H = _load_config("wav2vec2-large-960h-lv60-self")
HF_LARGE_XLSR_DE = _load_config("wav2vec2-large-xlsr-53-german")
# Config and corresponding factory functions
PRETRAIN_CONFIGS = parameterized.expand(
[
(HF_BASE, wav2vec2_base),
(HF_LARGE, wav2vec2_large),
(HF_LARGE_LV60, wav2vec2_large_lv60k),
(HF_LARGE_XLSR_53, wav2vec2_large_lv60k),
(HF_BASE_10K_VOXPOPULI, wav2vec2_base),
],
name_func=_name_func,
)
FINETUNE_CONFIGS = parameterized.expand(
[
(HF_BASE_960H, wav2vec2_base),
(HF_LARGE_960H, wav2vec2_large),
(HF_LARGE_LV60_960H, wav2vec2_large_lv60k),
(HF_LARGE_LV60_SELF_960H, wav2vec2_large_lv60k),
(HF_LARGE_XLSR_DE, wav2vec2_large_lv60k),
],
name_func=_name_func,
)
@skipIfNoModule("transformers")
class TestHFIntegration(TorchaudioTestCase):
"""Test the process of importing the models from Hugging Face Transformers
Test methods in this test suite check the following things
1. Models loaded with Hugging Face Transformers cane be imported.
2. The same model can be recreated without Hugging Face Transformers.
"""
def _get_model(self, config):
# Helper function to avoid importing transformers on module scope.
# Normally, we use `is_module_available` helper function to check if
# the library is available, and import it on module scope if available.
# However, somehow, once "transformers" is imported, `is_module_available`
# starts to fail. Therefore, we defer importing "transformers" until
# the actual tests are started.
from transformers.models.wav2vec2 import (
Wav2Vec2Config,
Wav2Vec2Model,
Wav2Vec2ForCTC,
)
if config["architectures"] == ["Wav2Vec2Model"]:
return Wav2Vec2Model(Wav2Vec2Config(**config))
if config["architectures"] == ["Wav2Vec2ForCTC"]:
return Wav2Vec2ForCTC(Wav2Vec2Config(**config))
raise ValueError(f'Unexpected arch: {config["architectures"]}')
def _test_import_pretrain(self, original, imported, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref = original.feature_extractor(x).transpose(1, 2)
hyp, _ = imported.feature_extractor(x, None)
self.assertEqual(ref, hyp)
# Feature projection
x = torch.randn(3, 10, config["conv_dim"][-1])
ref = original.feature_projection(x)[0]
hyp = imported.encoder.feature_projection(x)
self.assertEqual(ref, hyp)
# Convolutional Positional Encoder
x = torch.randn(3, 256, config["hidden_size"])
ref = original.encoder.pos_conv_embed(x)
hyp = imported.encoder.transformer.pos_conv_embed(x)
self.assertEqual(ref, hyp)
# Encoder Transformer Layer
for original_, imported_ in zip(original.encoder.layers, imported.encoder.transformer.layers):
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
(ref,) = original_(x, attention_mask=mask, output_attentions=False)
hyp = imported_(x, mask)
self.assertEqual(ref, hyp)
# The whole Encoder Transformer
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
ref = original.encoder(x).last_hidden_state
hyp = imported.encoder.transformer(x)
self.assertEqual(ref, hyp)
def _test_import_finetune(self, original, imported, config):
# Aux
x = torch.randn(3, 10, config["hidden_size"])
ref = original.lm_head(x)
hyp = imported.aux(x)
self.assertEqual(ref, hyp)
# The whole model without mask
x = torch.randn(3, 1024)
ref = original(x).logits
hyp, _ = imported(x)
self.assertEqual(ref, hyp)
# The whole model without mask
batch_size, num_frames = 3, 1024
x = torch.randn(batch_size, num_frames)
ref = original(x).logits
hyp, _ = imported(x)
self.assertEqual(ref, hyp)
# The whole model with mask
batch_size, num_frames = 3, 1024
x = torch.randn(batch_size, num_frames)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
mask = torch.arange(num_frames).expand(batch_size, num_frames) < lengths[:, None]
ref = original(x, attention_mask=mask).logits
hyp, output_lengths = imported(x, lengths)
for i, l in enumerate(output_lengths):
self.assertEqual(ref[i, :l, ...], hyp[i, :l, ...])
@PRETRAIN_CONFIGS
def test_import_pretrain(self, config, _):
"""wav2vec2 models from HF transformers can be imported and yields the same results"""
original = self._get_model(config).eval()
imported = import_huggingface_model(original).eval()
self._test_import_pretrain(original, imported, config)
@FINETUNE_CONFIGS
def test_import_finetune(self, config, _):
"""wav2vec2 models from HF transformers can be imported and yields the same results"""
original = self._get_model(config).eval()
imported = import_huggingface_model(original).eval()
self._test_import_pretrain(original.wav2vec2, imported, config)
self._test_import_finetune(original, imported, config)
def _test_recreate(self, imported, reloaded, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref, _ = imported.feature_extractor(x, None)
hyp, _ = reloaded.feature_extractor(x, None)
self.assertEqual(ref, hyp)
# Feature projection
x = torch.randn(3, 10, config["conv_dim"][-1])
ref = imported.encoder.feature_projection(x)
hyp = reloaded.encoder.feature_projection(x)
self.assertEqual(ref, hyp)
# Convolutional Positional Encoder
x = torch.randn(3, 256, config["hidden_size"])
ref = imported.encoder.transformer.pos_conv_embed(x)
hyp = reloaded.encoder.transformer.pos_conv_embed(x)
self.assertEqual(ref, hyp)
# Encoder Transformer Layer
for imported_, reloaded_ in zip(imported.encoder.transformer.layers, reloaded.encoder.transformer.layers):
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
ref = imported_(x, mask)
hyp = reloaded_(x, mask)
self.assertEqual(ref, hyp)
# The whole Encoder Transformer
# TODO: Add mask pattern. Expected mask shapes and values are different.
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
ref = imported.encoder.transformer(x)
hyp = reloaded.encoder.transformer(x)
self.assertEqual(ref, hyp)
# Aux
if imported.aux is not None:
x = torch.randn(3, 10, config["hidden_size"])
ref = imported.aux(x)
hyp = reloaded.aux(x)
self.assertEqual(ref, hyp)
# The whole model
x = torch.randn(3, 1024)
ref, _ = imported(x)
hyp, _ = reloaded(x)
self.assertEqual(ref, hyp)
@PRETRAIN_CONFIGS
def test_recreate_pretrain(self, config, factory_func):
"""Imported models can be recreated via a factory function without Hugging Face transformers."""
imported = import_huggingface_model(self._get_model(config)).eval()
reloaded = factory_func()
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
self._test_recreate(imported, reloaded, config)
@FINETUNE_CONFIGS
def test_recreate_finetune(self, config, factory_func):
"""Imported models can be recreated via a factory function without Hugging Face transformers."""
imported = import_huggingface_model(self._get_model(config)).eval()
reloaded = factory_func(aux_num_out=imported.aux.out_features)
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
self._test_recreate(imported, reloaded, config)
| [
"torch.manual_seed",
"torch.randint",
"torch.randn",
"torch.arange"
] | 1.4.0 | popcornell/audio | 7b6b2d000023e2aa3365b769866c5f375e0d5fda |
1.0 | # coding=utf-8
# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Longformer model. """
import math
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from ...activations import ACT2FN, gelu
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_longformer import LongformerConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LongformerConfig"
_TOKENIZER_FOR_DOC = "LongformerTokenizer"
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"allenai/longformer-base-4096",
"allenai/longformer-large-4096",
"allenai/longformer-large-4096-finetuned-triviaqa",
"allenai/longformer-base-4096-extra.pos.embd.only",
"allenai/longformer-large-4096-extra.pos.embd.only",
# See all Longformer models at https://huggingface.co/models?filter=longformer
]
@dataclass
class LongformerBaseModelOutput(ModelOutput):
"""
Base class for Longformer's outputs, with potential hidden states, local and global attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerBaseModelOutputWithPooling(ModelOutput):
"""
Base class for Longformer's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerMaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Masked language modeling (MLM) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering Longformer models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerMultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice Longformer models.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerTokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
def _get_question_end_index(input_ids, sep_token_id):
"""
Computes the index of the first occurance of `sep_token_id`.
"""
sep_token_indices = (input_ids == sep_token_id).nonzero()
batch_size = input_ids.shape[0]
assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions"
assert (
sep_token_indices.shape[0] == 3 * batch_size
), f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You might also consider to set `global_attention_mask` manually in the forward function to avoid this error."
return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1]
def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True):
"""
Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
True` else after `sep_token_id`.
"""
question_end_index = _get_question_end_index(input_ids, sep_token_id)
question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1
# bool attention mask with True in locations of global attention
attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device)
if before_sep_token is True:
attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.uint8)
else:
# last token is separation token and should not be counted and in the middle are two separation tokens
attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.uint8) * (
attention_mask.expand_as(input_ids) < input_ids.shape[-1]
).to(torch.uint8)
return attention_mask
# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
def create_position_ids_from_input_ids(input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class LongformerEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor inputs_embeds:
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
class LongformerSelfAttention(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_heads = config.num_attention_heads
self.head_dim = int(config.hidden_size / config.num_attention_heads)
self.embed_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.embed_dim)
self.key = nn.Linear(config.hidden_size, self.embed_dim)
self.value = nn.Linear(config.hidden_size, self.embed_dim)
# separate projection layers for tokens with global attention
self.query_global = nn.Linear(config.hidden_size, self.embed_dim)
self.key_global = nn.Linear(config.hidden_size, self.embed_dim)
self.value_global = nn.Linear(config.hidden_size, self.embed_dim)
self.dropout = config.attention_probs_dropout_prob
self.layer_id = layer_id
attention_window = config.attention_window[self.layer_id]
assert (
attention_window % 2 == 0
), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
assert (
attention_window > 0
), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
self.one_sided_attn_window_size = attention_window // 2
def forward(
self, hidden_states, attention_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None
):
"""
LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`. Padding to
`attention_window` happens in LongformerModel.forward to avoid redoing the padding on each layer.
The `attention_mask` is changed in `BertModel.forward` from 0, 1, 2 to -ve: no attention
0: local attention
+ve: global attention
"""
hidden_states = hidden_states.transpose(0, 1)
# project hidden states
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
seq_len, batch_size, embed_dim = hidden_states.size()
assert (
embed_dim == self.embed_dim
), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}"
# normalize query
query_vectors /= math.sqrt(self.head_dim)
query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
attn_scores = self._sliding_chunks_query_key_matmul(
query_vectors, key_vectors, self.one_sided_attn_window_size
)
# values to pad for attention probs
remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]
# cast to fp32/fp16 then replace 1's with -inf
float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(
remove_from_windowed_attention_mask, -10000.0
)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size
)
# pad local attention probs
attn_scores += diagonal_mask
assert list(attn_scores.size()) == [
batch_size,
seq_len,
self.num_heads,
self.one_sided_attn_window_size * 2 + 1,
], f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}"
# compute local attention probs from global attention keys and contact over window dim
if is_global_attn:
# compute global attn indices required through out forward fn
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn)
# calculate global attn probs from global key
global_key_attn_scores = self._concat_with_global_key_attn_probs(
query_vectors=query_vectors,
key_vectors=key_vectors,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
)
# concat to local_attn_probs
# (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1)
# free memory
del global_key_attn_scores
local_attn_probs_fp32 = F.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical stability
local_attn_probs = local_attn_probs_fp32.type_as(attn_scores)
# free memory
del local_attn_probs_fp32
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
local_attn_probs = torch.masked_fill(local_attn_probs, is_index_masked[:, :, None, None], 0.0)
# apply dropout
local_attn_probs = F.dropout(local_attn_probs, p=self.dropout, training=self.training)
value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
# compute local attention output with global attention value and add
if is_global_attn:
# compute sum of global and local attn
attn_output = self._compute_attn_output_with_global_indices(
value_vectors=value_vectors,
attn_probs=local_attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
)
else:
# compute local attn only
attn_output = self._sliding_chunks_matmul_attn_probs_value(
local_attn_probs, value_vectors, self.one_sided_attn_window_size
)
assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size"
attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous()
# compute value for global attention and overwrite to attention output
# TODO: remove the redundant computation
if is_global_attn:
global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(
hidden_states=hidden_states,
max_num_global_attn_indices=max_num_global_attn_indices,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=is_index_masked,
)
# get only non zero global attn output
nonzero_global_attn_output = global_attn_output[
is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]
]
# overwrite values with global attention
attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(
len(is_local_index_global_attn_nonzero[0]), -1
)
# The attention weights for tokens with global attention are
# just filler values, they were never used to compute the output.
# Fill with 0 now, the correct values are in 'global_attn_probs'.
local_attn_probs[is_index_global_attn_nonzero] = 0
outputs = (attn_output.transpose(0, 1), local_attn_probs)
return outputs + (global_attn_probs,) if is_global_attn else outputs
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, padding):
"""pads rows and then flips rows and columns"""
hidden_states_padded = F.pad(
hidden_states_padded, padding
) # padding value is not important because it will be overwritten
hidden_states_padded = hidden_states_padded.view(
*hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2)
)
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_hidden_states):
"""
shift every row 1 step right, converting columns into diagonals.
Example::
chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492,
-1.8348, 0.7672, 0.2986, 0.0285,
-0.7584, 0.4206, -0.0405, 0.1599,
2.0514, -1.1600, 0.5372, 0.2629 ]
window_overlap = num_rows = 4
(pad & diagonalize) =>
[ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000
0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000
0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size()
chunked_hidden_states = F.pad(
chunked_hidden_states, (0, window_overlap + 1)
) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, -1
) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap
chunked_hidden_states = chunked_hidden_states[
:, :, :-window_overlap
] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
@staticmethod
def _chunk(hidden_states, window_overlap):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
# non-overlapping chunks of size = 2w
hidden_states = hidden_states.view(
hidden_states.size(0),
hidden_states.size(1) // (window_overlap * 2),
window_overlap * 2,
hidden_states.size(2),
)
# use `as_strided` to make the chunks overlap with an overlap size = window_overlap
chunk_size = list(hidden_states.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(hidden_states.stride())
chunk_stride[1] = chunk_stride[1] // 2
return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)
@staticmethod
def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor:
beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0])
beginning_mask = beginning_mask_2d[None, :, None, :]
ending_mask = beginning_mask.flip(dims=(1, 3))
beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]
beginning_mask = beginning_mask.expand(beginning_input.size())
beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) # `== 1` converts to bool or uint8
ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :]
ending_mask = ending_mask.expand(ending_input.size())
ending_input.masked_fill_(ending_mask == 1, -float("inf")) # `== 1` converts to bool or uint8
def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
"""
Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an
overlap of size window_overlap
"""
batch_size, seq_len, num_heads, head_dim = query.size()
assert (
seq_len % (window_overlap * 2) == 0
), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
assert query.size() == key.size()
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
chunked_query = self._chunk(query, window_overlap)
chunked_key = self._chunk(key, window_overlap)
# matrix multiplication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap
chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (chunked_query, chunked_key)) # multiply
# convert diagonals into columns
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
chunked_attention_scores, padding=(0, 0, 0, 1)
)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score from each word to itself, then
# followed by window_overlap columns for the upper triangle.
diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(
(batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1) : -1, window_overlap + 1 :
]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
:, 0, : window_overlap - 1, 1 - window_overlap :
]
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
def _sliding_chunks_matmul_attn_probs_value(
self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int
):
"""
Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
same shape as `attn_probs`
"""
batch_size, seq_len, num_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1
)
# group batch_size and num_heads dimensions into one
value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = F.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2)
@staticmethod
def _get_global_attn_indices(is_index_global_attn):
""" compute global attn indices required throughout forward pass """
# helper variable
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
# max number of global attn indices in batch
max_num_global_attn_indices = num_global_attn_indices.max()
# indices of global attn
is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
# helper variable
is_local_index_global_attn = torch.arange(
max_num_global_attn_indices, device=is_index_global_attn.device
) < num_global_attn_indices.unsqueeze(dim=-1)
# location of the non-padding values within global attention indices
is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)
# location of the padding values within global attention indices
is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True)
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
):
batch_size = key_vectors.shape[0]
# create only global key vectors
key_vectors_only_global = key_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global))
attn_probs_from_global_key[
is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1]
] = -10000.0
return attn_probs_from_global_key
def _compute_attn_output_with_global_indices(
self,
value_vectors,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
):
batch_size = attn_probs.shape[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)
# get value vectors for global only
value_vectors_only_global = value_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]
# use `matmul` because `einsum` crashes sometimes with fp16
# attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
# compute attn output only global
attn_output_only_global = torch.matmul(
attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2)
).transpose(1, 2)
# reshape attn probs
attn_probs_without_global = attn_probs.narrow(
-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices
).contiguous()
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(
self,
hidden_states,
max_num_global_attn_indices,
is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
is_index_masked,
):
seq_len, batch_size = hidden_states.shape[:2]
# prepare global hidden states
global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim)
global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[
is_index_global_attn_nonzero[::-1]
]
# global key, query, value
global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
global_key_vectors = self.key_global(hidden_states)
global_value_vectors = self.value_global(hidden_states)
# normalize
global_query_vectors_only_global /= math.sqrt(self.head_dim)
# reshape
global_query_vectors_only_global = (
global_query_vectors_only_global.contiguous()
.view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim)
global_key_vectors = (
global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
global_value_vectors = (
global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
# compute attn scores
global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2))
assert list(global_attn_scores.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
seq_len,
], f"global_attn_scores have the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is {global_attn_scores.size()}."
global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_scores[
is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], :
] = -10000.0
global_attn_scores = global_attn_scores.masked_fill(
is_index_masked[:, None, None, :],
-10000.0,
)
global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
# compute global attn probs
global_attn_probs_float = F.softmax(
global_attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
global_attn_probs = F.dropout(
global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training
)
# global attn output
global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)
assert list(global_attn_output.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
self.head_dim,
], f"global_attn_output tensor has the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is {global_attn_output.size()}."
global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_output = global_attn_output.view(
batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim
)
return global_attn_output, global_attn_probs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class LongformerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LongformerAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.self = LongformerSelfAttention(config, layer_id)
self.output = LongformerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self, hidden_states, attention_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None
):
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)
attn_output = self.output(self_outputs[0], hidden_states)
outputs = (attn_output,) + self_outputs[1:]
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LongformerIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class LongformerOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LongformerLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = LongformerAttention(config, layer_id)
self.intermediate = LongformerIntermediate(config)
self.output = LongformerOutput(config)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(
self, hidden_states, attention_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None
):
self_attn_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)
attn_output = self_attn_outputs[0]
outputs = self_attn_outputs[1:]
layer_output = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output
)
outputs = (layer_output,) + outputs
return outputs
def ff_chunk(self, attn_output):
intermediate_output = self.intermediate(attn_output)
layer_output = self.output(intermediate_output, attn_output)
return layer_output
class LongformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None # All local attentions.
all_global_attentions = () if (output_attentions and is_global_attn) else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, is_global_attn)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
is_index_masked,
is_index_global_attn,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)
hidden_states = layer_outputs[0]
if output_attentions:
# bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),)
if is_global_attn:
# bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None
)
return LongformerBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
global_attentions=all_global_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class LongformerPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Longformer
class LongformerLMHead(nn.Module):
"""Longformer Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
class LongformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LongformerConfig
base_model_prefix = "longformer"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
LONGFORMER_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.LongformerConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
LONGFORMER_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.LongformerTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
global_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to decide the attention given on each token, local attention or global attention. Tokens with global
attention attends to all other tokens, and all other tokens attend to them. This is important for
task-specific finetuning because it makes the model more flexible at representing the task. For example,
for classification, the <s> token should be given global attention. For QA, all question tokens should also
have global attention. Please refer to the `Longformer paper <https://arxiv.org/abs/2004.05150>`__ for more
details. Mask values selected in ``[0, 1]``:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Longformer Model outputting raw hidden-states without any specific head on top.",
LONGFORMER_START_DOCSTRING,
)
class LongformerModel(LongformerPreTrainedModel):
"""
This class copied code from :class:`~transformers.RobertaModel` and overwrote standard self-attention with
longformer self-attention to provide the ability to process long sequences following the self-attention approach
described in `Longformer: the Long-Document Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy,
Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global
attention to extend to long documents without the O(n^2) increase in memory and compute.
The self-attention module :obj:`LongformerSelfAttention` implemented here supports the combination of local and
global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and
dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks.
Future release will add support for autoregressive attention, but the support for dilated attention requires a
custom CUDA kernel to be memory and compute efficient.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
if isinstance(config.attention_window, int):
assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
assert config.attention_window > 0, "`config.attention_window` has to be positive"
config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer
else:
assert len(config.attention_window) == config.num_hidden_layers, (
"`len(config.attention_window)` should equal `config.num_hidden_layers`. "
f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
)
self.embeddings = LongformerEmbeddings(config)
self.encoder = LongformerEncoder(config)
self.pooler = LongformerPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def _pad_to_window_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of Longformer self-attention."""
# padding
attention_window = (
self.config.attention_window
if isinstance(self.config.attention_window, int)
else max(self.config.attention_window)
)
assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
logger.info(
"Input ids are automatically padded from {} to {} to be a multiple of `config.attention_window`: {}".format(
seq_len, seq_len + padding_len, attention_window
)
)
if input_ids is not None:
input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens
token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor):
# longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
# (global_attention_mask + 1) => 1 for local attention, 2 for global attention
# => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
if attention_mask is not None:
attention_mask = attention_mask * (global_attention_mask + 1)
else:
# simply use `global_attention_mask` as `attention_mask`
# if no `attention_mask` is given
attention_mask = global_attention_mask + 1
return attention_mask
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongformerBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Examples::
>>> import torch
>>> from transformers import LongformerModel, LongformerTokenizer
>>> model = LongformerModel.from_pretrained('allenai/longformer-base-4096')
>>> tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096')
>>> SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document
>>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1
>>> # Attention mask values -- 0: no attention, 1: local attention, 2: global attention
>>> attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to local attention
>>> global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to global attention to be deactivated for all tokens
>>> global_attention_mask[:, [1, 4, 21,]] = 1 # Set global attention to random tokens for the sake of this example
... # Usually, set global attention based on the task. For example,
... # classification: the <s> token
... # QA: question tokens
... # LM: potentially on the beginning of sentences and paragraphs
>>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)
>>> sequence_output = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# merge `global_attention_mask` and `attention_mask`
if global_attention_mask is not None:
attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)[
:, 0, 0, :
]
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
# undo padding
if padding_len > 0:
# unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)
sequence_output = sequence_output[:, :-padding_len]
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return LongformerBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
global_attentions=encoder_outputs.global_attentions,
)
@add_start_docstrings("""Longformer Model with a `language modeling` head on top. """, LONGFORMER_START_DOCSTRING)
class LongformerForMaskedLM(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.lm_head = LongformerLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongformerMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Examples::
>>> import torch
>>> from transformers import LongformerForMaskedLM, LongformerTokenizer
>>> model = LongformerForMaskedLM.from_pretrained('allenai/longformer-base-4096')
>>> tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096')
>>> SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document
>>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1
>>> attention_mask = None # default is local attention everywhere, which is a good choice for MaskedLM
... # check ``LongformerModel.forward`` for more details how to set `attention_mask`
>>> outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids)
>>> loss = outputs.loss
>>> prediction_logits = output.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return LongformerMaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForSequenceClassification(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.classifier = LongformerClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="allenai/longformer-base-4096",
output_type=LongformerSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if global_attention_mask is None:
logger.info("Initializing global attention on CLS token...")
global_attention_mask = torch.zeros_like(input_ids)
# global attention on cls token
global_attention_mask[:, 0] = 1
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return LongformerSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class LongformerClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, hidden_states, **kwargs):
hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
output = self.out_proj(hidden_states)
return output
@add_start_docstrings(
"""
Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD /
TriviaQA (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForQuestionAnswering(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
Returns:
Examples::
>>> from transformers import LongformerTokenizer, LongformerForQuestionAnswering
>>> import torch
>>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")
>>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> encoding = tokenizer(question, text, return_tensors="pt")
>>> input_ids = encoding["input_ids"]
>>> # default is local attention everywhere
>>> # the forward method will automatically set global attention on question tokens
>>> attention_mask = encoding["attention_mask"]
>>> outputs = model(input_ids, attention_mask=attention_mask)
>>> start_logits = outputs.start_logits
>>> end_logits = outputs.end_logits
>>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist())
>>> answer_tokens = all_tokens[torch.argmax(start_logits) :torch.argmax(end_logits)+1]
>>> answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens)) # remove space prepending space token
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if global_attention_mask is None:
if input_ids is None:
logger.warning(
"It is not possible to automatically generate the `global_attention_mask` because input_ids is None. Please make sure that it is correctly set."
)
else:
# set global attention on question tokens automatically
global_attention_mask = _compute_global_attention_mask(input_ids, self.config.sep_token_id)
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return LongformerQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
@add_start_docstrings(
"""
Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForTokenClassification(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="allenai/longformer-base-4096",
output_type=LongformerTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return LongformerTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForMultipleChoice(LongformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.longformer = LongformerModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="allenai/longformer-base-4096",
output_type=LongformerMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
global_attention_mask=None,
labels=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# set global attention on question tokens
if global_attention_mask is None and input_ids is not None:
logger.info("Initializing global attention on multiple choice...")
# put global attention on all tokens after `config.sep_token_id`
global_attention_mask = torch.stack(
[
_compute_global_attention_mask(input_ids[:, i], self.config.sep_token_id, before_sep_token=False)
for i in range(num_choices)
],
dim=1,
)
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_global_attention_mask = (
global_attention_mask.view(-1, global_attention_mask.size(-1))
if global_attention_mask is not None
else None
)
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.longformer(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
global_attention_mask=flat_global_attention_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return LongformerMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
| [
"torch.nn.Linear",
"torch.cat",
"torch.einsum",
"torch.bmm",
"torch.ones",
"torch.masked_fill",
"torch.nn.functional.pad",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.tensor",
"torch.zeros_like",
"torch.zeros",
"torch.nn.Tanh",
"torch.nn.functional.dropout",
"torch.nn.functional.softmax",
"torch.cumsum",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.tanh",
"torch.nn.Embedding"
] | 1.0 | cgravill/transformers | 90d5ab3bfe8c20d9beccfe89fdfd62a8e5ac31e5 |
1.7 | # PyTorch
import torch
from torch.utils.data import IterableDataset, DataLoader
from donkeycar.utils import train_test_split
from donkeycar.parts.tub_v2 import Tub
from torchvision import transforms
from typing import List, Any
from donkeycar.pipeline.types import TubRecord, TubDataset
from donkeycar.pipeline.sequence import TubSequence
import pytorch_lightning as pl
def get_default_transform(for_video=False, for_inference=False):
"""
Creates a default transform to work with torchvision models
Video transform:
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W),
where H and W are expected to be 112, and T is a number of video frames
in a clip. The images have to be loaded in to a range of [0, 1] and
then normalized using mean = [0.43216, 0.394666, 0.37645] and
std = [0.22803, 0.22145, 0.216989].
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_size = (224, 224)
if for_video:
mean = [0.43216, 0.394666, 0.37645]
std = [0.22803, 0.22145, 0.216989]
input_size = (112, 112)
transform = transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
return transform
class TorchTubDataset(IterableDataset):
'''
Loads the dataset, and creates a train/test split.
'''
def __init__(self, config, records: List[TubRecord], transform=None):
"""Create a PyTorch Tub Dataset
Args:
config (object): the configuration information
records (List[TubRecord]): a list of tub records
transform (function, optional): a transform to apply to the data
"""
self.config = config
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.sequence = TubSequence(records)
self.pipeline = self._create_pipeline()
def _create_pipeline(self):
""" This can be overridden if more complicated pipelines are
required """
def y_transform(record: TubRecord):
angle: float = record.underlying['user/angle']
throttle: float = record.underlying['user/throttle']
predictions = torch.tensor([angle, throttle], dtype=torch.float)
# Normalize to be between [0, 1]
# angle and throttle are originally between [-1, 1]
predictions = (predictions + 1) / 2
return predictions
def x_transform(record: TubRecord):
# Loads the result of Image.open()
img_arr = record.image(cached=True, as_nparray=False)
return self.transform(img_arr)
# Build pipeline using the transformations
pipeline = self.sequence.build_pipeline(x_transform=x_transform,
y_transform=y_transform)
return pipeline
def __iter__(self):
return iter(self.pipeline)
class TorchTubDataModule(pl.LightningDataModule):
def __init__(self, config: Any, tub_paths: List[str], transform=None):
"""Create a PyTorch Lightning Data Module to contain all data loading logic
Args:
config (object): the configuration information
tub_paths (List[str]): a list of paths to the tubs to use (minimum size of 1).
Each tub path corresponds to another training run.
transform (function, optional): a transform to apply to the data
"""
super().__init__()
self.config = config
self.tub_paths = tub_paths
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.tubs: List[Tub] = [Tub(tub_path, read_only=True)
for tub_path in self.tub_paths]
self.records: List[TubRecord] = []
self.setup()
def setup(self, stage=None):
"""Load all the tub data and set up the datasets.
Args:
stage ([string], optional): setup expects a string arg stage.
It is used to separate setup logic for trainer.fit
and trainer.test. Defaults to None.
"""
# Loop through all the different tubs and load all the records for each of them
for tub in self.tubs:
for underlying in tub:
record = TubRecord(self.config, tub.base_path,
underlying=underlying)
self.records.append(record)
train_records, val_records = train_test_split(
self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT))
assert len(val_records) > 0, "Not enough validation data. Add more data"
self.train_dataset = TorchTubDataset(
self.config, train_records, transform=self.transform)
self.val_dataset = TorchTubDataset(
self.config, val_records, transform=self.transform)
def train_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
def val_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
| [
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.7.1 | DavidMeda/donkeycar | 1e42e40cb07d6f15c22461dc3f00182a7279cf0c |
1.4 | import torch
from torch import jit
class ComputeReturns(jit.ScriptModule):
__constants__ = ['gamma', 'T', 'B']
def __init__(self, target_actor_net, target_critic_net,
num_processes, reward_steps, batch_size, device,
gamma=0.99):
super(ComputeReturns, self).__init__()
self.gamma = gamma
self.T = reward_steps
self.B = batch_size * num_processes
self.register_buffer("targets", torch.zeros(self.T + 1, self.B, device=device))
self.target_actor_net = target_actor_net
self.target_critic_net = target_critic_net
@jit.script_method
def forward(self, obs, reward_batch, mask_batch):
last_action = self.target_actor_net(obs)
last_value = self.target_critic_net(obs, last_action)
reward_batch = (reward_batch - reward_batch.mean()) / (reward_batch.std() + 1e-6)
self.targets[-1] = last_value.squeeze(-1)
idx = self.T - 1
for i in range(self.T):
self.targets[idx - i] = reward_batch[idx - i] + \
self.gamma * self.targets[idx - i + 1] * mask_batch[idx - i]
return self.targets[:-1].detach()
| [
"torch.zeros"
] | 1.4.0 | fengredrum/Batch_D3PG | b1128db2b22ce6ba94665a066b1cc401f33145b5 |
1.8 | import torch
import numpy as np
def get_ntk_n(xloader, networks, recalbn=0, train_mode=False, num_batch=-1):
device = torch.cuda.current_device()
# if recalbn > 0:
# network = recal_bn(network, xloader, recalbn, device)
# if network_2 is not None:
# network_2 = recal_bn(network_2, xloader, recalbn, device)
ntks = []
for network in networks:
if train_mode:
network.train()
else:
network.eval()
######
grads = [[] for _ in range(len(networks))]
for i, (inputs, _) in enumerate(xloader):
if num_batch > 0 and i >= num_batch: break
inputs = inputs.cuda(device=device, non_blocking=True)
for net_idx, network in enumerate(networks):
network.zero_grad()
inputs_ = inputs.clone().cuda(device=device, non_blocking=True)
logit = network(inputs_)
if isinstance(logit, tuple):
logit = logit[1] # 201 networks: return features and logits
for _idx in range(len(inputs_)):
logit[_idx:_idx+1].backward(torch.ones_like(logit[_idx:_idx+1]), retain_graph=True)
grad = []
for name, W in network.named_parameters():
if 'weight' in name and W.grad is not None:
grad.append(W.grad.view(-1).detach())
grads[net_idx].append(torch.cat(grad, -1))
torch.cuda.empty_cache()
######
grads = [torch.stack(_grads, 0) for _grads in grads]
ntks = [torch.einsum('nc,mc->nm', [_grads, _grads]) for _grads in grads]
conds = []
for ntk in ntks:
eigenvalues, _ = torch.symeig(ntk) # ascending
conds.append(np.nan_to_num((eigenvalues[-1] / eigenvalues[0]).item(), copy=True, nan=np.inf))
return conds | [
"torch.symeig",
"torch.cat",
"torch.stack",
"torch.einsum",
"torch.cuda.current_device",
"torch.cuda.empty_cache",
"torch.ones_like"
] | 1.8.0 | 2Dooh/TF-MOENAS | edd6ec8c3f89cfbe9674873425c5056e72899edb |
1.2 | import time
from typing import Tuple, Dict, Any
import torch
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from models.forward_tacotron import ForwardTacotron
from trainer.common import Averager, TTSSession, MaskedL1, to_device, np_now
from utils.checkpoints import save_checkpoint
from utils.dataset import get_tts_datasets
from utils.decorators import ignore_exception
from utils.display import stream, simple_table, plot_mel, plot_pitch
from utils.dsp import DSP
from utils.files import parse_schedule
from utils.paths import Paths
class ForwardTrainer:
def __init__(self,
paths: Paths,
dsp: DSP,
config: Dict[str, Any]) -> None:
self.paths = paths
self.dsp = dsp
self.config = config
self.train_cfg = config['forward_tacotron']['training']
self.writer = SummaryWriter(log_dir=paths.forward_log, comment='v1')
self.l1_loss = MaskedL1()
def train(self, model: ForwardTacotron, optimizer: Optimizer) -> None:
forward_schedule = self.train_cfg['schedule']
forward_schedule = parse_schedule(forward_schedule)
for i, session_params in enumerate(forward_schedule, 1):
lr, max_step, bs = session_params
if model.get_step() < max_step:
train_set, val_set = get_tts_datasets(
path=self.paths.data, batch_size=bs, r=1, model_type='forward',
max_mel_len=self.train_cfg['max_mel_len'],
filter_attention=self.train_cfg['filter_attention'],
filter_min_alignment=self.train_cfg['min_attention_alignment'],
filter_min_sharpness=self.train_cfg['min_attention_sharpness'])
session = TTSSession(
index=i, r=1, lr=lr, max_step=max_step,
bs=bs, train_set=train_set, val_set=val_set)
self.train_session(model, optimizer, session)
def train_session(self, model: ForwardTacotron,
optimizer: Optimizer, session: TTSSession) -> None:
current_step = model.get_step()
training_steps = session.max_step - current_step
total_iters = len(session.train_set)
epochs = training_steps // total_iters + 1
simple_table([(f'Steps', str(training_steps // 1000) + 'k Steps'),
('Batch Size', session.bs),
('Learning Rate', session.lr)])
for g in optimizer.param_groups:
g['lr'] = session.lr
m_loss_avg = Averager()
dur_loss_avg = Averager()
duration_avg = Averager()
pitch_loss_avg = Averager()
device = next(model.parameters()).device # use same device as model parameters
for e in range(1, epochs + 1):
for i, batch in enumerate(session.train_set, 1):
batch = to_device(batch, device=device)
start = time.time()
model.train()
pitch_zoneout_mask = torch.rand(batch['x'].size()) > self.train_cfg['pitch_zoneout']
energy_zoneout_mask = torch.rand(batch['x'].size()) > self.train_cfg['energy_zoneout']
pitch_target = batch['pitch'].detach().clone()
energy_target = batch['energy'].detach().clone()
batch['pitch'] = batch['pitch'] * pitch_zoneout_mask.to(device).float()
batch['energy'] = batch['energy'] * energy_zoneout_mask.to(device).float()
pred = model(batch)
m1_loss = self.l1_loss(pred['mel'], batch['mel'], batch['mel_len'])
m2_loss = self.l1_loss(pred['mel_post'], batch['mel'], batch['mel_len'])
dur_loss = self.l1_loss(pred['dur'].unsqueeze(1), batch['dur'].unsqueeze(1), batch['x_len'])
pitch_loss = self.l1_loss(pred['pitch'], pitch_target.unsqueeze(1), batch['x_len'])
energy_loss = self.l1_loss(pred['energy'], energy_target.unsqueeze(1), batch['x_len'])
loss = m1_loss + m2_loss \
+ self.train_cfg['dur_loss_factor'] * dur_loss \
+ self.train_cfg['pitch_loss_factor'] * pitch_loss \
+ self.train_cfg['energy_loss_factor'] * energy_loss
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(),
self.train_cfg['clip_grad_norm'])
optimizer.step()
m_loss_avg.add(m1_loss.item() + m2_loss.item())
dur_loss_avg.add(dur_loss.item())
step = model.get_step()
k = step // 1000
duration_avg.add(time.time() - start)
pitch_loss_avg.add(pitch_loss.item())
speed = 1. / duration_avg.get()
msg = f'| Epoch: {e}/{epochs} ({i}/{total_iters}) | Mel Loss: {m_loss_avg.get():#.4} ' \
f'| Dur Loss: {dur_loss_avg.get():#.4} | Pitch Loss: {pitch_loss_avg.get():#.4} ' \
f'| {speed:#.2} steps/s | Step: {k}k | '
if step % self.train_cfg['checkpoint_every'] == 0:
save_checkpoint(model=model, optim=optimizer, config=self.config,
path=self.paths.forward_checkpoints / f'forward_step{k}k.pt')
if step % self.train_cfg['plot_every'] == 0:
self.generate_plots(model, session)
self.writer.add_scalar('Mel_Loss/train', m1_loss + m2_loss, model.get_step())
self.writer.add_scalar('Pitch_Loss/train', pitch_loss, model.get_step())
self.writer.add_scalar('Energy_Loss/train', energy_loss, model.get_step())
self.writer.add_scalar('Duration_Loss/train', dur_loss, model.get_step())
self.writer.add_scalar('Params/batch_size', session.bs, model.get_step())
self.writer.add_scalar('Params/learning_rate', session.lr, model.get_step())
stream(msg)
val_out = self.evaluate(model, session.val_set)
self.writer.add_scalar('Mel_Loss/val', val_out['mel_loss'], model.get_step())
self.writer.add_scalar('Duration_Loss/val', val_out['dur_loss'], model.get_step())
self.writer.add_scalar('Pitch_Loss/val', val_out['pitch_loss'], model.get_step())
self.writer.add_scalar('Energy_Loss/val', val_out['energy_loss'], model.get_step())
save_checkpoint(model=model, optim=optimizer, config=self.config,
path=self.paths.forward_checkpoints / 'latest_model.pt')
m_loss_avg.reset()
duration_avg.reset()
pitch_loss_avg.reset()
print(' ')
def evaluate(self, model: ForwardTacotron, val_set: DataLoader) -> Dict[str, float]:
model.eval()
m_val_loss = 0
dur_val_loss = 0
pitch_val_loss = 0
energy_val_loss = 0
device = next(model.parameters()).device
for i, batch in enumerate(val_set, 1):
batch = to_device(batch, device=device)
with torch.no_grad():
pred = model(batch)
m1_loss = self.l1_loss(pred['mel'], batch['mel'], batch['mel_len'])
m2_loss = self.l1_loss(pred['mel_post'], batch['mel'], batch['mel_len'])
dur_loss = self.l1_loss(pred['dur'].unsqueeze(1), batch['dur'].unsqueeze(1), batch['x_len'])
pitch_loss = self.l1_loss(pred['pitch'], batch['pitch'].unsqueeze(1), batch['x_len'])
energy_loss = self.l1_loss(pred['energy'], batch['energy'].unsqueeze(1), batch['x_len'])
pitch_val_loss += pitch_loss
energy_val_loss += energy_loss
m_val_loss += m1_loss.item() + m2_loss.item()
dur_val_loss += dur_loss.item()
return {
'mel_loss': m_val_loss / len(val_set),
'dur_loss': dur_val_loss / len(val_set),
'pitch_loss': pitch_val_loss / len(val_set),
'energy_loss': energy_val_loss / len(val_set)
}
@ignore_exception
def generate_plots(self, model: ForwardTacotron, session: TTSSession) -> None:
model.eval()
device = next(model.parameters()).device
batch = session.val_sample
batch = to_device(batch, device=device)
pred = model(batch)
m1_hat = np_now(pred['mel'])[0, :600, :]
m2_hat = np_now(pred['mel_post'])[0, :600, :]
m_target = np_now(batch['mel'])[0, :600, :]
m1_hat_fig = plot_mel(m1_hat)
m2_hat_fig = plot_mel(m2_hat)
m_target_fig = plot_mel(m_target)
pitch_fig = plot_pitch(np_now(batch['pitch'][0]))
pitch_gta_fig = plot_pitch(np_now(pred['pitch'].squeeze()[0]))
energy_fig = plot_pitch(np_now(batch['energy'][0]))
energy_gta_fig = plot_pitch(np_now(pred['energy'].squeeze()[0]))
self.writer.add_figure('Pitch/target', pitch_fig, model.step)
self.writer.add_figure('Pitch/ground_truth_aligned', pitch_gta_fig, model.step)
self.writer.add_figure('Energy/target', energy_fig, model.step)
self.writer.add_figure('Energy/ground_truth_aligned', energy_gta_fig, model.step)
self.writer.add_figure('Ground_Truth_Aligned/target', m_target_fig, model.step)
self.writer.add_figure('Ground_Truth_Aligned/linear', m1_hat_fig, model.step)
self.writer.add_figure('Ground_Truth_Aligned/postnet', m2_hat_fig, model.step)
m2_hat_wav = self.dsp.griffinlim(m2_hat)
target_wav = self.dsp.griffinlim(m_target)
self.writer.add_audio(
tag='Ground_Truth_Aligned/target_wav', snd_tensor=target_wav,
global_step=model.step, sample_rate=self.dsp.sample_rate)
self.writer.add_audio(
tag='Ground_Truth_Aligned/postnet_wav', snd_tensor=m2_hat_wav,
global_step=model.step, sample_rate=self.dsp.sample_rate)
gen = model.generate(batch['x'][0:1, :batch['x_len'][0]])
m1_hat_fig = plot_mel(gen['mel'])
m2_hat_fig = plot_mel(gen['mel_post'])
pitch_gen_fig = plot_pitch(np_now(gen['pitch'].squeeze()))
energy_gen_fig = plot_pitch(np_now(gen['energy'].squeeze()))
self.writer.add_figure('Pitch/generated', pitch_gen_fig, model.step)
self.writer.add_figure('Energy/generated', energy_gen_fig, model.step)
self.writer.add_figure('Generated/target', m_target_fig, model.step)
self.writer.add_figure('Generated/linear', m1_hat_fig, model.step)
self.writer.add_figure('Generated/postnet', m2_hat_fig, model.step)
m2_hat_wav = self.dsp.griffinlim(m2_hat)
self.writer.add_audio(
tag='Generated/target_wav', snd_tensor=target_wav,
global_step=model.step, sample_rate=self.dsp.sample_rate)
self.writer.add_audio(
tag='Generated/postnet_wav', snd_tensor=m2_hat_wav,
global_step=model.step, sample_rate=self.dsp.sample_rate)
| [
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter"
] | 1.2.0 | anh/ForwardTacotron | a58d9244844b4512f5655e154f08f934760c88b3 |
2.9 | import json as _json
import multiprocessing as _mp
import os as _os
from collections import Callable
from functools import partial as _partial
from os import sep as _sep
import numpy as _np
import torch as _torch
import torch.utils.data as _data
from torch.utils.data import DataLoader as _DataLoader, Dataset as _Dataset
from torch.utils.data._utils.collate import default_collate as _default_collate
import dad_torch.data.datautils as _du
import dad_torch.utils as _etutils
from dad_torch.utils.logger import *
from .datautils import UnPaddedDDPSampler
def _job(total, func, i, f):
print(f"Working on: [ {i}/{total} ]", end='\r')
return func(f)
def multiRun(nproc: int, data_list: list, func: Callable) -> list:
_files = []
for ix, file in enumerate(data_list, 1):
_files.append([ix, file])
with _mp.Pool(processes=nproc) as pool:
return list(
pool.starmap(_partial(_job, len(_files), func), _files)
)
def safe_collate(batch):
r"""Safely select batches/skip dataset_cls(errors in file loading."""
return _default_collate([b for b in batch if b])
def num_workers(args, loader_args, distributed=False):
if distributed:
return (loader_args['num_workers'] + args['num_gpus'] - 1) // args['num_gpus']
return loader_args['num_workers']
def batch_size(args, loader_args, distributed=False):
if distributed:
loader_args['batch_size'] = loader_args['batch_size'] // args['num_gpus']
return loader_args['batch_size']
def _seed_worker(worker_id):
seed = (int(_torch.initial_seed()) + worker_id) % (2 ** 32 - 1)
_np.random.seed(seed)
def _et_data_job_func(mode, file, dataspec, args, dataset_cls):
test_dataset = dataset_cls(mode=mode, **args)
test_dataset.add(files=[file], verbose=False, **dataspec)
return test_dataset
def _et_data_job(mode, arg, dspec, cls, total, func, verbose, i, file):
if verbose:
print(f"Working on: [ {i} / {total} ]", end='\r')
return func(mode, file, dspec, arg, cls)
class DTDataHandle:
def __init__(self, args=None, dataloader_args=None, **kw):
self.args = _etutils.FrozenDict(args)
self.dataloader_args = _etutils.FrozenDict(dataloader_args)
def get_dataset(self, handle_key, files, dataspec: dict, dataset_cls=None):
dataset = dataset_cls(mode=handle_key, limit=self.args['load_limit'], **self.args)
dataset.add(files=files, verbose=self.args['verbose'], **dataspec)
return dataset
def get_train_dataset(self, split_file, dataspec: dict, dataset_cls=None):
if dataset_cls is None or self.dataloader_args.get('train', {}).get('dataset'):
return self.dataloader_args.get('train', {}).get('dataset')
r"""Load the train data from current fold/split."""
with open(dataspec['split_dir'] + _sep + split_file) as file:
split = _json.loads(file.read())
train_dataset = self.get_dataset('train', split.get('train', []),
dataspec, dataset_cls=dataset_cls)
return train_dataset
def get_validation_dataset(self, split_file, dataspec: dict, dataset_cls=None):
if dataset_cls is None or self.dataloader_args.get('validation', {}).get('dataset'):
return self.dataloader_args.get('validation', {}).get('dataset')
r""" Load the validation data from current fold/split."""
with open(dataspec['split_dir'] + _sep + split_file) as file:
split = _json.loads(file.read())
val_dataset = self.get_dataset('validation', split.get('validation', []),
dataspec, dataset_cls=dataset_cls)
if val_dataset and len(val_dataset) > 0:
return val_dataset
def get_test_dataset(self, split_file, dataspec: dict, dataset_cls=None):
if dataset_cls is None or self.dataloader_args.get('test', {}).get('dataset'):
return self.dataloader_args.get('test', {}).get('dataset')
with open(dataspec['split_dir'] + _sep + split_file) as file:
_files = _json.loads(file.read()).get('test', [])[:self.args['load_limit']]
if self.args['load_sparse'] and len(_files) > 1:
datasets = DTDataHandle.multi_load('test', _files, dataspec, self.args, dataset_cls)
success(f'\n{len(datasets)} sparse dataset loaded.', self.args['verbose'])
else:
datasets = self.get_dataset('test', _files, dataspec, dataset_cls=dataset_cls)
if len(datasets) > 0 and sum([len(t) for t in datasets if t]) > 0:
return datasets
def get_loader(self, handle_key='', distributed=False, use_unpadded_sampler=False, **kw):
args = {**self.args}
args['distributed'] = distributed
args['use_unpadded_sampler'] = use_unpadded_sampler
args.update(self.dataloader_args.get(handle_key, {}))
args.update(**kw)
if args.get('dataset') is None:
return None
loader_args = {
'dataset': None,
'batch_size': 1,
'sampler': None,
'shuffle': False,
'batch_sampler': None,
'num_workers': 0,
'pin_memory': False,
'drop_last': False,
'timeout': 0,
'worker_init_fn': _seed_worker if args.get('seed_all') else None
}
for k in loader_args.keys():
loader_args[k] = args.get(k, loader_args.get(k))
if args['distributed']:
sampler_args = {
'num_replicas': args.get('replicas'),
'rank': args.get('rank'),
'shuffle': args.get('shuffle'),
'seed': args.get('seed')
}
if loader_args.get('sampler') is None:
loader_args['shuffle'] = False # Shuffle is mutually exclusive with sampler
if args['use_unpadded_sampler']:
loader_args['sampler'] = UnPaddedDDPSampler(loader_args['dataset'], **sampler_args)
else:
loader_args['sampler'] = _data.distributed.DistributedSampler(loader_args['dataset'],
**sampler_args)
loader_args['num_workers'] = num_workers(args, loader_args, True)
loader_args['batch_size'] = batch_size(args, loader_args, True)
return _DataLoader(collate_fn=safe_collate, **loader_args)
def create_splits(self, dataspec, out_dir):
if _du.should_create_splits_(out_dir, dataspec, self.args):
_du.default_data_splitter_(dspec=dataspec, args=self.args)
info(f"{len(_os.listdir(dataspec['split_dir']))} split(s) created in '{dataspec['split_dir']}' directory.",
self.args['verbose'])
else:
splits_len = len(_os.listdir(dataspec['split_dir']))
info(f"{splits_len} split(s) loaded from '{dataspec['split_dir']}' directory.",
self.args['verbose'] and splits_len > 0)
def init_dataspec_(self, dataspec: dict):
for k in dataspec:
if '_dir' in k:
path = _os.path.join(self.args['dataset_dir'], dataspec[k])
path = path.replace(f"{_sep}{_sep}", _sep)
if path.endswith(_sep):
path = path[:-1]
dataspec[k] = path
@staticmethod
def multi_load(mode, files, dataspec, args, dataset_cls, func=_et_data_job_func) -> list:
r"""Note: Only works with dad_torch's default args from dad_torch import args"""
_files = []
for ix, f in enumerate(files, 1):
_files.append([ix, f])
nw = min(num_workers(args, args, args['use_ddp']), len(_files))
with _mp.Pool(processes=max(1, nw)) as pool:
dataset_list = list(
pool.starmap(
_partial(_et_data_job, mode, args, dataspec, dataset_cls, len(_files), func, args['verbose']),
_files)
)
return [_d for _d in dataset_list if len(_d) >= 1]
class DTDataset(_Dataset):
def __init__(self, mode='init', limit=None, **kw):
self.mode = mode
self.limit = limit
self.indices = []
self.data = {}
self.args = _etutils.FrozenDict(kw)
self.dataspecs = _etutils.FrozenDict({})
def load_index(self, dataset_name, file):
r"""
Logic to load indices of a single file.
-Sometimes one image can have multiple indices like U-net where we have to get multiple patches of images.
"""
self.indices.append([dataset_name, file])
def _load_indices(self, dataspec_name, files, verbose=True):
r"""
We load the proper indices/names(whatever is called) of the files in order to prepare minibatches.
Only load lim numbr of files so that it is easer to debug(Default is infinite, -lim/--load-lim argument).
"""
_files = files[:self.limit]
if len(_files) > 1:
dataset_objs = DTDataHandle.multi_load(
self.mode, _files, self.dataspecs[dataspec_name], self.args, self.__class__
)
self.gather(dataset_objs)
else:
for f in _files:
self.load_index(dataspec_name, f)
success(f'\n{dataspec_name}, {self.mode}, {len(self)} indices Loaded.', verbose)
def gather(self, dataset_objs):
for d in dataset_objs:
attributes = vars(d)
for k, v in attributes.items():
if isinstance(v, _etutils.FrozenDict):
continue
if isinstance(v, list):
self.__getattribute__(f"{k}").extend(v)
elif isinstance(attributes[f"{k}"], dict):
self.__getattribute__(f"{k}").update(**v)
elif isinstance(attributes[f"{k}"], set):
self.__getattribute__(f"{k}").union(v)
def __getitem__(self, index):
r"""
Logic to load one file and send to model. The mini-batch generation will be handled by Dataloader.
Here we just need to write logic to deal with single file.
"""
raise NotImplementedError('Must be implemented by child class.')
def __len__(self):
return len(self.indices)
def transforms(self, **kw):
return None
def add(self, files, verbose=True, **kw):
r""" An extra layer for added flexibility."""
self.dataspecs[kw['name']] = kw
self._load_indices(dataspec_name=kw['name'], files=files, verbose=verbose)
| [
"torch.initial_seed",
"torch.utils.data._utils.collate.default_collate",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
] | 2.9.2 | trendscenter/easytorch | 0faf6c7f09701c8f73ed4061214ca724c83d82aa |
1.5 | from ast import parse
from torch.autograd.grad_mode import no_grad
from torch.functional import Tensor
from torch.nn.modules.loss import BCELoss, CrossEntropyLoss
from dataset import WakeWordDataset
from model import CNNNetwork
from torch.utils.data import DataLoader
from tqdm import tqdm
from typing import Tuple
import argparse
import torch
import torchaudio.transforms as T
import torch.nn as nn
import torch.optim as optim
def binary_accuracy(preds: torch.Tensor, y: torch.Tensor) -> None:
# round predictions to the closest integer
return (preds == y).sum() / len(y)
def train(model:nn.Module,dataloader:Tuple[torch.tensor,torch.tensor],criterion,optimizer:optim,device:str,epochs:int)->torch.Tensor:
losses = []
preds = []
labels = []
pbar = tqdm(dataloader, desc="TRAIN")
for (input, label) in pbar:
input, label = input.to(device), label.to(device)
pred = model(input)
loss = criterion(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#acc = binary_accuracy(pred, label).item()
losses.append(loss.item())
preds.append(pred.detach().cpu())
labels.append(label.detach().cpu())
# print("Epoch: {}, Iteration: {}/{}, loss:{}".format(epoch,
# idx, len(train_loader), loss))
pbar.set_postfix(loss=loss)
avg_train_loss = sum(losses)/len(losses)
#acc = binary_accuracy(torch.cat(preds), torch.cat(labels))
#print('avg train loss:', avg_train_loss, "avg train acc", acc)
#report = classification_report(torch.cat(labels), torch.cat(preds))
#print(report)
#return acc
@torch.no_grad()
def test(model:nn.Module,dataloader:Tuple[torch.tensor,torch.tensor],criterion,optimizer:optim,device:str,epochs:int)->None:
losses = []
preds = []
labels = []
pbar = tqdm(dataloader, desc="TEST")
for (input, label) in pbar:
input, label = input.to(device), label.to(device)
pred = model(input)
loss = criterion(pred, label)
#acc = binary_accuracy(pred, label).item()
losses.append(loss.item())
preds.append(pred.detach().cpu())
labels.append(label.detach().cpu())
# print("Epoch: {}, Iteration: {}/{}, loss:{}".format(epoch,
# idx, len(train_loader), loss))
pbar.set_postfix(loss=loss)
avg_train_loss = sum(losses)/len(losses)
#acc = binary_accuracy(torch.cat(preds), torch.cat(labels))
#print('avg train loss:', avg_train_loss, "avg train acc", acc)
#report = classification_report(torch.cat(labels), torch.cat(preds))
#print(report)
#return acc
def main(args) -> None:
if (not args.no_cuda) and torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print(f"Using {device}")
mel_spectrogram = T.MelSpectrogram(
sample_rate=args.sample_rate,
n_fft=1024,
hop_length=512,
n_mels=64
)
train_dataset = WakeWordDataset(
args.train_data_json, audio_transformation=mel_spectrogram, device=device)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size)
test_dataset = WakeWordDataset(
args.test_data_json, audio_transformation=mel_spectrogram, device=device)
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size)
model_cnn = CNNNetwork().to(device)
criterion = CrossEntropyLoss().to(device)
optimizer = optim.AdamW(model_cnn.parameters(), lr=args.lr)
for epoch in tqdm(range(args.epochs), desc="Epoch"):
# train model
train(model_cnn, train_dataloader, criterion,
optimizer, device, epoch)
test(model_cnn, test_dataloader, criterion,
optimizer, device, epoch)
# save model
torch.save(model_cnn.state_dict(), args.save_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""
Script to train the Wake Word Detection and save the model
""",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('--sample_rate', type=int, default=8000,
help='the number of samples collected per second, default at 8000')
parser.add_argument('--epochs', type=int, default=100,
help='epoch size')
parser.add_argument('--batch_size', type=int, default=32,
help='size of batch')
parser.add_argument('--lr', type=float, default=1e-3, help="learning rate")
parser.add_argument('--save_path', type=str, default=None,
help='path to save the train model')
parser.add_argument('--train_data_json', type=str, default=None, required=True,
help='path to train dat json file')
parser.add_argument('--test_data_json', type=str, default=None, required=True,
help='path to train dat json file')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
args = parser.parse_args()
main(args)
| [
"torch.nn.modules.loss.CrossEntropyLoss",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.5.0 | wimausberlin/voice-assistant-system | 2d92fa0989109279b3959bc6d5ea32777d50cbac |
1.1 | import torch
from torch import multiprocessing, cuda
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.backends import cudnn
import numpy as np
import importlib
import os
import voc12.dataloader
from misc import torchutils, imutils
cudnn.enabled = True
def _work(process_id, model, dataset, args):
databin = dataset[process_id]
n_gpus = torch.cuda.device_count()
data_loader = DataLoader(databin, shuffle=False, num_workers=args.num_workers // n_gpus, pin_memory=False)
#data_loader = DataLoader(databin, shuffle=False, num_workers=0, pin_memory=False)
with torch.no_grad(), cuda.device(process_id):
model.cuda()
for iter, pack in enumerate(data_loader):
img_name = pack['name'][0]
label = pack['label'][0]
size = pack['size']
strided_size = imutils.get_strided_size(size, 4)
strided_up_size = imutils.get_strided_up_size(size, 16)
outputs = [model(img[0].cuda(non_blocking=True))
for img in pack['img']]
strided_cam = torch.sum(torch.stack(
[F.interpolate(torch.unsqueeze(o, 0), strided_size, mode='bilinear', align_corners=False)[0] for o
in outputs]), 0)
highres_cam = [F.interpolate(torch.unsqueeze(o, 1), strided_up_size,
mode='bilinear', align_corners=False) for o in outputs]
highres_cam = torch.sum(torch.stack(highres_cam, 0), 0)[:, 0, :size[0], :size[1]]
valid_cat = torch.nonzero(label)[:, 0]
strided_cam = strided_cam[valid_cat]
try:
strided_cam /= F.adaptive_max_pool2d(strided_cam, (1, 1)) + 1e-5
highres_cam = highres_cam[valid_cat]
highres_cam /= F.adaptive_max_pool2d(highres_cam, (1, 1)) + 1e-5
# save cams
np.save(os.path.join(args.cam_out_dir, img_name.replace('/', '_') + '.npy'),
{"keys": valid_cat, "cam": strided_cam.cpu(), "high_res": highres_cam.cpu().numpy()})
if process_id == n_gpus - 1 and iter % (len(databin) // 20) == 0:
print("%d " % ((5*iter+1)//(len(databin) // 20)), end='')
except:
print("worked")
continue
def run(args):
model = getattr(importlib.import_module(args.cam_network), 'CAM')()
model.load_state_dict(torch.load(args.cam_weights_name + '.pth'), strict=True)
model.eval()
n_gpus = torch.cuda.device_count()
dataset = voc12.dataloader.VOC12ClassificationDatasetMSF(args.train_list,
voc12_root=args.voc12_root, scales=args.cam_scales)
dataset = torchutils.split_dataset(dataset, n_gpus)
print('[ ', end='')
multiprocessing.spawn(_work, nprocs=n_gpus, args=(model, dataset, args), join=True)
#_work(0, model, dataset, args)
print(']')
torch.cuda.empty_cache()
| [
"torch.nonzero",
"torch.stack",
"torch.nn.functional.adaptive_max_pool2d",
"torch.no_grad",
"torch.multiprocessing.spawn",
"torch.cuda.device",
"torch.cuda.device_count",
"torch.unsqueeze",
"torch.cuda.empty_cache",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.1.0 | saramsv/CCT | 27b4fd838a174a3c0fca582aa163e5bd426b055a |
1.10 | import torch
from torchvision.models.segmentation import fcn_resnet50
from torchvision.utils import draw_segmentation_masks
from PIL import Image
import io
from torchvision.transforms import transforms
from torchvision.utils import save_image
import torchvision.transforms.functional as F
def get_model():
model = fcn_resnet50(pretrained=True, progress=True)
model = model.eval()
return model
def transform_image(image_bytes):
my_transforms = transforms.Compose([transforms.Resize(255),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image = Image.open(io.BytesIO(image_bytes))
image.convert("RGB")
image.save("static/org_pil.jpg")
print("Saved Original image successfully")
return my_transforms(image).unsqueeze(0)
sem_classes = [
'__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
sem_class_to_idx = {cls: idx for (idx, cls) in enumerate(sem_classes)}
def get_prediction(image_bytes):
try:
print('Entered here')
tensor = transform_image(image_bytes=image_bytes)
model = get_model()
output = model(tensor)['out']
print(output.shape)
except Exception:
return 0, 'Failed'
print(output.shape, output.min().item(), output.max().item())
normalized_masks = torch.nn.functional.softmax(output, dim=1)
num_classes = normalized_masks.shape[1]
class_dim = 1
all_classes_masks = normalized_masks.argmax(class_dim) == torch.arange(num_classes)[:, None, None, None]
all_classes_masks = all_classes_masks.swapaxes(0, 1)
dogs_with_masks = [
draw_segmentation_masks(img, masks=mask, alpha=.6)
for img, mask in zip(tensor.to(dtype=torch.uint8), all_classes_masks)
]
img = dogs_with_masks[0]
img = img.detach()
img = F.to_pil_image(img)
img.save("static/masked.jpg")
print("Saved masked image successfully")
return None
| [
"torch.arange",
"torch.nn.functional.softmax"
] | 1.10.2 | ummadiviany/image-segmenter | 906457a16765f7e1995a8dca7b222ba3abd13a3f |
1.2 | # Author: Robin Ru @ University of Oxford
# This is an implementation of zero-cost estimators based on:
# https://github.com/BayesWatch/nas-without-training (Jacov)
# and https://github.com/gahaalt/SNIP-pruning (SNIP)
import numpy as np
import torch
import logging
import gc
from naslib.predictors.predictor import Predictor
from naslib.predictors.utils.build_nets import get_cell_based_tiny_net
from naslib.utils.utils import get_project_root, get_train_val_loaders
from naslib.predictors.utils.build_nets.build_darts_net import NetworkCIFAR
from naslib.search_spaces.darts.conversions import convert_compact_to_genotype
logger = logging.getLogger(__name__)
def get_batch_jacobian(net, x, target):
net.zero_grad()
x.requires_grad_(True)
_, y = net(x)
y.backward(torch.ones_like(y))
jacob = x.grad.detach()
return jacob, target.detach()
def eval_score(jacob, labels=None):
corrs = np.corrcoef(jacob)
v, _ = np.linalg.eig(corrs)
k = 1e-5
return -np.sum(np.log(v + k) + 1. / (v + k))
class ZeroCostEstimators(Predictor):
def __init__(self, config, batch_size = 64, method_type='jacov'):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.batch_size = batch_size
self.method_type = method_type
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config.data = "{}/data".format(get_project_root())
self.config = config
if method_type == 'jacov':
self.num_classes = 1
else:
num_classes_dic = {'cifar10': 10, 'cifar100': 100, 'ImageNet16-120': 120}
self.num_classes = num_classes_dic[self.config.dataset]
def pre_process(self):
self.train_loader, _, _, _, _ = get_train_val_loaders(self.config, mode='train')
def query(self, xtest, info=None):
test_set_scores = []
count = 0
for test_arch in xtest:
count += 1
logger.info('zero cost: {} of {}'.format(count, len(xtest)))
if 'nasbench201' in self.config.search_space:
ops_to_nb201 = {'AvgPool1x1': 'avg_pool_3x3', 'ReLUConvBN1x1': 'nor_conv_1x1',
'ReLUConvBN3x3': 'nor_conv_3x3', 'Identity': 'skip_connect', 'Zero': 'none',}
# convert the naslib representation to nasbench201
cell = test_arch.edges[2, 3].op
edge_op_dict = {(i, j): ops_to_nb201[cell.edges[i, j]['op'].get_op_name] for i, j in cell.edges}
op_edge_list = ['{}~{}'.format(edge_op_dict[(i, j)], i - 1) for i, j in sorted(edge_op_dict, key=lambda x: x[1])]
arch_str = '|{}|+|{}|{}|+|{}|{}|{}|'.format(*op_edge_list)
arch_config = {'name': 'infer.tiny', 'C': 16, 'N':5, 'arch_str': arch_str,
'num_classes': self.num_classes}
network = get_cell_based_tiny_net(arch_config) # create the network from configuration
elif 'darts' in self.config.search_space:
test_genotype = convert_compact_to_genotype(test_arch.compact)
arch_config = {'name': 'darts', 'C': 32, 'layers': 8, 'genotype': test_genotype,
'num_classes': self.num_classes, 'auxiliary': False}
network = NetworkCIFAR(arch_config)
data_iterator = iter(self.train_loader)
x, target = next(data_iterator)
x, target = x.to(self.device), target.to(self.device)
network = network.to(self.device)
if self.method_type == 'jacov':
jacobs, labels = get_batch_jacobian(network, x, target)
# print('done get jacobs')
jacobs = jacobs.reshape(jacobs.size(0), -1).cpu().numpy()
try:
score = eval_score(jacobs, labels)
except Exception as e:
print(e)
score = -10e8
elif self.method_type == 'snip':
criterion = torch.nn.CrossEntropyLoss()
network.zero_grad()
_, y = network(x)
loss = criterion(y, target)
loss.backward()
grads = [p.grad.detach().clone().abs() for p in network.parameters() if p.grad is not None]
with torch.no_grad():
saliences = [(grad * weight).view(-1).abs() for weight, grad in zip(network.parameters(), grads)]
score = torch.sum(torch.cat(saliences)).cpu().numpy()
if hasattr(self, 'ss_type') and self.ss_type == 'darts':
score = -score
# print(f'nclass={self.num_classes}, scores={score}')
test_set_scores.append(score)
network, data_iterator, x, target, jacobs, labels = None, None, None, None, None, None
torch.cuda.empty_cache()
gc.collect()
return np.array(test_set_scores)
| [
"torch.cat",
"torch.no_grad",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.ones_like",
"torch.nn.CrossEntropyLoss"
] | 1.2.0 | shenyann/NASLib | 6fad875f21e41bb9c91647bbd0620aa6e6dc8c7f |
0.4 | import torch
import random
def bce_loss(input, target):
"""
Numerically stable version of the binary cross-entropy loss function.
As per https://github.com/pytorch/pytorch/issues/751
See the TensorFlow docs for a derivation of this formula:
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Input:
- input: PyTorch Tensor of shape (N, ) giving scores.
- target: PyTorch Tensor of shape (N,) containing 0 and 1 giving targets.
Output:
- A PyTorch Tensor containing the mean BCE loss over the minibatch of
input data.
"""
neg_abs = -input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
def gan_g_loss(scores_fake):
"""
Input:
- scores_fake: Tensor of shape (N,) containing scores for fake samples
Output:
- loss: Tensor of shape (,) giving GAN generator loss
"""
y_fake = torch.ones_like(scores_fake) * random.uniform(0.7, 1.2)
return bce_loss(scores_fake, y_fake)
def gan_d_loss(scores_real, scores_fake):
"""
Input:
- scores_real: Tensor of shape (N,) giving scores for real samples
- scores_fake: Tensor of shape (N,) giving scores for fake samples
Output:
- loss: Tensor of shape (,) giving GAN discriminator loss
"""
y_real = torch.ones_like(scores_real) * random.uniform(0.7, 1.2)
y_fake = torch.zeros_like(scores_fake) * random.uniform(0, 0.3)
loss_real = bce_loss(scores_real, y_real)
loss_fake = bce_loss(scores_fake, y_fake)
return loss_real + loss_fake
def l2_loss(pred_traj, pred_traj_gt, loss_mask, random=0, mode='average'):
"""
Input:
- pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (seq_len, batch, 2). Groud truth
predictions.
- loss_mask: Tensor of shape (batch, seq_len)
- mode: Can be one of sum, average, raw
Output:
- loss: l2 loss depending on mode
"""
seq_len, batch, _ = pred_traj.size()
# print("l2 loss")
# print(loss_mask.unsqueeze(dim=0).transpose_(1,2).shape)
# print(pred_traj_gt.permute(1, 0, 2).shape)
# print(pred_traj.permute(1, 0, 2).shape)
print("loss_mask:", loss_mask.shape)
print(loss_mask.unsqueeze(dim=2).shape)
print("pred_seq: ", pred_traj.shape)
loss = (loss_mask.unsqueeze(dim=2) *
(pred_traj_gt.permute(1, 0, 2) - pred_traj.permute(1, 0, 2))**2)
if mode == 'sum':
return torch.sum(loss)
elif mode == 'average':
return torch.sum(loss) / torch.numel(loss_mask.data)
elif mode == 'raw':
return loss.sum(dim=2).sum(dim=1)
def displacement_error(pred_traj, pred_traj_gt, consider_ped=None, mode='sum'):
"""
Input:
- pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (seq_len, batch, 2). Ground truth
predictions.
- consider_ped: Tensor of shape (batch)
- mode: Can be one of sum, raw
Output:
- loss: gives the eculidian displacement error
"""
seq_len, _, _ = pred_traj.size()
loss = pred_traj_gt.permute(1, 0, 2) - pred_traj.permute(1, 0, 2)
loss = loss**2
# print("consider_ped: ", consider_ped)
if consider_ped is not None:
loss = torch.sqrt(loss.sum(dim=2)).sum(dim=1) * consider_ped
else:
loss = torch.sqrt(loss.sum(dim=2)).sum(dim=1)
if mode == 'sum':
return torch.sum(loss)
elif mode == 'raw':
return loss
def final_displacement_error(
pred_pos, pred_pos_gt, consider_ped=None, mode='sum'
):
"""
Input:
- pred_pos: Tensor of shape (batch, 2). Predicted last pos.
- pred_pos_gt: Tensor of shape (seq_len, batch, 2). Groud truth
last pos
- consider_ped: Tensor of shape (batch)
Output:
- loss: gives the eculidian displacement error
"""
loss = pred_pos_gt - pred_pos
loss = loss**2
if consider_ped is not None:
loss = torch.sqrt(loss.sum(dim=1)) * consider_ped
else:
loss = torch.sqrt(loss.sum(dim=1))
if mode == 'raw':
return loss
else:
return torch.sum(loss)
| [
"torch.zeros_like",
"torch.numel",
"torch.ones_like",
"torch.sum"
] | 0.4.0 | sapan-ostic/deep_prediction | e4709e4a66477755e6afe39849597ae1e3e969b5 |
1.0 | import argparse
import torch
import onmt
import onmt.model_builder
import onmt.inputters as inputters
import onmt.opts
from onmt.utils.misc import use_gpu
from onmt.utils.logging import init_logger, logger
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-output_dir', default='.',
help="""Path to output the embeddings""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
def write_embeddings(filename, dict, embeddings):
with open(filename, 'wb') as file:
for i in range(min(len(embeddings), len(dict.itos))):
str = dict.itos[i].encode("utf-8")
for j in range(len(embeddings[0])):
str = str + (" %5f" % (embeddings[i][j])).encode("utf-8")
file.write(str + b"\n")
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
onmt.opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Add in default model arguments, possibly added since training.
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = onmt.inputters.load_old_vocab(vocab)
else:
fields = vocab
src_dict = fields['src'][0][1].base_field.vocab # assumes src is text
tgt_dict = fields['tgt'][0][1].base_field.vocab
model_opt = checkpoint['opt']
for arg in dummy_opt.__dict__:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt.__dict__[arg]
model = onmt.model_builder.build_base_model(
model_opt, fields, use_gpu(opt), checkpoint)
encoder = model.encoder
decoder = model.decoder
encoder_embeddings = encoder.embeddings.word_lut.weight.data.tolist()
decoder_embeddings = decoder.embeddings.word_lut.weight.data.tolist()
logger.info("Writing source embeddings")
write_embeddings(opt.output_dir + "/src_embeddings.txt", src_dict,
encoder_embeddings)
logger.info("Writing target embeddings")
write_embeddings(opt.output_dir + "/tgt_embeddings.txt", tgt_dict,
decoder_embeddings)
logger.info('... done.')
logger.info('Converting model...')
if __name__ == "__main__":
init_logger('extract_embeddings.log')
main()
| [
"torch.cuda.set_device",
"torch.load"
] | 1.0 | cocoxu/OpenNMT-py | 820ad912dda0b5cbe49c53762374deb6bedd1299 |
0.6 | """LVQMLN example using all four dimensions of the Iris dataset."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
class Backbone(torch.nn.Module):
def __init__(self, input_size=4, hidden_size=10, latent_size=2):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.latent_size = latent_size
self.dense1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.dense2 = torch.nn.Linear(self.hidden_size, self.latent_size)
self.activation = torch.nn.Sigmoid()
def forward(self, x):
x = self.activation(self.dense1(x))
out = self.activation(self.dense2(x))
return out
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Dataset
train_ds = pt.datasets.Iris()
# Reproducibility
pl.utilities.seed.seed_everything(seed=42)
# Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=150)
# Hyperparameters
hparams = dict(
distribution=[3, 4, 5],
proto_lr=0.001,
bb_lr=0.001,
)
# Initialize the backbone
backbone = Backbone()
# Initialize the model
model = pt.models.LVQMLN(
hparams,
prototypes_initializer=pt.initializers.SSCI(
train_ds,
transform=backbone,
),
backbone=backbone,
)
# Model summary
print(model)
# Callbacks
vis = pt.models.VisSiameseGLVQ2D(
data=train_ds,
map_protos=False,
border=0.1,
resolution=500,
axis_off=True,
)
pruning = pt.models.PruneLoserPrototypes(
threshold=0.01,
idle_epochs=20,
prune_quota_per_epoch=2,
frequency=10,
verbose=True,
)
# Setup trainer
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[
vis,
pruning,
],
)
# Training loop
trainer.fit(model, train_loader)
| [
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.nn.Sigmoid"
] | 0.6.0 | dmoebius-dm/prototorch_models | 71602bf38a09148eab13d98c9f89589b345ac570 |
1.6 | """
This script was made by soeque1 at 24/07/20.
To implement code for training your model.
"""
import logging
from argparse import ArgumentParser, Namespace
from logging import getLogger
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from transformers.optimization import AdamW
from src.core.build_data import Config
from src.data import UbuntuDataLoader, UbuntuDataSet, collate
from src.metric import bleuS_4
from src.model.net import ReCoSA
from src.utils.prepare import build
logger = getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class RecoSAPL(pl.LightningModule):
def __init__(self, config: dict, len_train_dataloader: int = None) -> None:
super().__init__()
self.config = config
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = ReCoSA(config=self.config.model, _device=self._device)
self.pred = []
self.target = []
self.len_train_dataloader = len_train_dataloader
self.lr_scale = 1.0
def forward(self, x):
return self.model.forward()
def inference(self, ctx: torch.Tensor, response: torch.Tensor) -> torch.Tensor:
return self.model.inference(ctx, response)
def generate(self, ctx: torch.Tensor) -> str:
return self.model.generate(ctx)
def training_step(self, batch, batch_idx):
ctx, response, target = batch
pred = self.model(ctx, response)
if batch_idx % 1000 == 0:
logger.info(self.model.tokenizer.decode(torch.argmax(pred[0], dim=0)))
logger.info(self.model.tokenizer.decode(response[0]))
loss = F.cross_entropy(
pred, target, ignore_index=self.model.tokenizer.pad_token_id
)
ppl = torch.exp(loss)
self.log(
"lr",
self.lr_scale * self.config.trainer.lr,
on_step=True,
on_epoch=False,
prog_bar=False,
logger=True,
)
self.log_dict(
{"tr_loss": loss, "tr_ppl": ppl},
on_step=False,
on_epoch=True,
prog_bar=True,
sync_dist=True,
logger=True,
)
return loss
def validation_step(self, batch, batch_idx):
ctx, response, target = batch
pred = self.model(ctx, response)
loss = F.cross_entropy(
pred, target, ignore_index=self.model.tokenizer.pad_token_id
)
ppl = torch.exp(loss)
if batch_idx % 100 == 0:
pred_sen = torch.argmax(pred, dim=1)
pred_sentence = [
self.model.tokenizer.decode(i)
.split(self.model.tokenizer.eos_token)[0]
.split()
+ [self.model.tokenizer.eos_token]
for i in pred_sen
]
target_sentence = [
self.model.tokenizer.decode(i)
.split(self.model.tokenizer.eos_token)[0]
.split()
+ [self.model.tokenizer.eos_token]
for i in target
]
logger.info("idx: " + str(batch_idx))
logger.info("pred: " + " ".join(pred_sentence[0]))
logger.info("target: " + " ".join(target_sentence[0]))
self.log_dict(
{"val_loss": loss, "val_ppl": ppl},
on_step=False,
on_epoch=True,
prog_bar=True,
sync_dist=True,
logger=True,
)
return loss
def configure_optimizers(self):
# https://github.com/huggingface/transformers/blob/a75c64d80c76c3dc71f735d9197a4a601847e0cd/examples/contrib/run_openai_gpt.py
param_optimizer = list(self.model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": self.config.trainer.weight_decay,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
return AdamW(optimizer_grouped_parameters, lr=self.config.trainer.lr, eps=1e-8)
def optimizer_step(
self,
current_epoch,
batch_nb,
optimizer,
optimizer_idx,
second_order_closure=None,
on_tpu=False,
using_native_amp=False,
using_lbfgs=False,
):
# warm up lr
if self.trainer.global_step < float(self.config.trainer.warmup_steps):
self.lr_scale = min(
1.0,
float(self.trainer.global_step + 1)
/ float(self.config.trainer.warmup_steps),
)
for pg in optimizer.param_groups:
pg["lr"] = self.lr_scale * self.config.trainer.lr
else:
self.lr_scale = 1.0
# update params
optimizer.step()
optimizer.zero_grad()
def test_step(self, batch, batch_idx):
ctx, _, target = batch
pred, pred_sen = self.model.generate(ctx, max_seq=ctx.shape[2])
loss = F.cross_entropy(
pred, target, ignore_index=self.model.tokenizer.pad_token_id
)
ppl = torch.exp(loss)
pred_sentence = [
self.model.tokenizer.decode(i)
.split(self.model.tokenizer.eos_token)[0]
.split()
+ [self.model.tokenizer.eos_token]
for i in pred_sen
]
target_sentence = [
self.model.tokenizer.decode(i)
.split(self.model.tokenizer.eos_token)[0]
.split()
+ [self.model.tokenizer.eos_token]
for i in target
]
target_sentence_list = [[i] for i in target_sentence]
self.pred.extend(pred_sentence)
self.target.extend(target_sentence_list)
bleu_score = bleuS_4(pred_sentence, target_sentence_list).to(ppl.device)
if batch_idx % 10 == 0:
logger.info("idx: " + str(batch_idx))
ctx_decoded = [
self.model.tokenizer.decode(i).split(self.model.tokenizer.eos_token)[0]
+ self.model.tokenizer.eos_token
for i in ctx[0]
]
logger.info("idx: " + " ".join(ctx_decoded))
logger.info("pred: " + " ".join(pred_sentence[0]))
logger.info("target: " + " ".join(target_sentence[0]))
self.log_dict(
{"val_loss_gen": loss, "val_ppl_gen": ppl, "val_bleu_gen": bleu_score},
on_step=False,
on_epoch=True,
prog_bar=True,
sync_dist=True,
logger=True,
)
return loss
def main(
config_data_file: str,
config_model_file: str,
config_trainer_file: str,
version: str,
) -> None:
# TODO: to be removed
_ = build({"data_config": config_data_file, "version": version})
cfg = Config()
cfg.add_dataset(config_data_file)
cfg.add_model(config_model_file)
cfg.add_trainer(config_trainer_file)
train_data = UbuntuDataSet(
cfg.dataset.root + cfg.dataset.target,
cfg.dataset.raw.train,
cfg.model.max_seq,
cfg.dataset.target,
cfg.model.max_turns,
)
val_data = UbuntuDataSet(
cfg.dataset.root + cfg.dataset.target,
cfg.dataset.raw.val,
cfg.model.max_seq,
cfg.dataset.target,
cfg.model.max_turns,
)
train_dataloader = UbuntuDataLoader(
train_data,
batch_size=cfg.model.batch_size,
shuffle=True,
num_workers=8,
collate_fn=collate,
)
val_dataloader = UbuntuDataLoader(
val_data,
batch_size=cfg.model.batch_size,
shuffle=False,
num_workers=8,
collate_fn=collate,
)
logger = TensorBoardLogger(save_dir="exp", name=cfg.dataset.target, version=version)
prefix = f"exp/{cfg.dataset.target}/{version}/"
suffix = "{epoch:02d}-{val_loss:.4f}"
filepath = prefix + suffix
checkpoint_callback = ModelCheckpoint(
filepath=filepath,
save_top_k=1,
monitor="val_loss",
save_weights_only=True,
verbose=True,
)
model = RecoSAPL(cfg, len(train_data))
trainer = pl.Trainer(
**cfg.trainer.pl,
logger=logger,
checkpoint_callback=checkpoint_callback,
)
trainer.fit(model, train_dataloader, val_dataloader)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--config_data_file", default="./conf/dataset/ubuntu.yml", type=str
)
parser.add_argument(
"--config_model_file", default="./conf/model/ReCoSa.yml", type=str
)
parser.add_argument(
"--config_trainer_file", default="./conf/trainer/ReCoSa.yml", type=str
)
parser.add_argument("--version", default="v0.0.1", type=str)
args = parser.parse_args()
main(
args.config_data_file,
args.config_model_file,
args.config_trainer_file,
args.version,
)
| [
"torch.nn.functional.cross_entropy",
"torch.cuda.is_available",
"torch.exp",
"torch.argmax"
] | 1.6.0 | HephaestusProject/pytorch-ReCoSa | eca171582a9021845009ade542cd99c2e5ddf701 |
1.5 | """
Text generation using a character LSTM, specifically we want to
generate new names as inspiration for those having a baby :)
Although this is for name generation, the code is general in the
way that you can just send in any large text file (shakespear text, etc)
and it will generate it.
Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
* 2020-05-09 Initial coding
"""
import torch
import torch.nn as nn
import string
import random
import sys
import unidecode
# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Get characters from string.printable
all_characters = string.printable
n_characters = len(all_characters)
# Read large text file (Note can be any text file: not limited to just names)
file = unidecode.unidecode(open("data/names.txt").read())
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embed = nn.Embedding(input_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden, cell):
out = self.embed(x)
out, (hidden, cell) = self.lstm(out.unsqueeze(1), (hidden, cell))
out = self.fc(out.reshape(out.shape[0], -1))
return out, (hidden, cell)
def init_hidden(self, batch_size):
hidden = torch.zeros(self.num_layers, batch_size, self.hidden_size).to(device)
cell = torch.zeros(self.num_layers, batch_size, self.hidden_size).to(device)
return hidden, cell
class Generator:
def __init__(self):
self.chunk_len = 250
self.num_epochs = 5000
self.batch_size = 1
self.print_every = 50
self.hidden_size = 256
self.num_layers = 2
self.lr = 0.003
def char_tensor(self, string):
tensor = torch.zeros(len(string)).long()
for c in range(len(string)):
tensor[c] = all_characters.index(string[c])
return tensor
def get_random_batch(self):
start_idx = random.randint(0, len(file) - self.chunk_len)
end_idx = start_idx + self.chunk_len + 1
text_str = file[start_idx:end_idx]
text_input = torch.zeros(self.batch_size, self.chunk_len)
text_target = torch.zeros(self.batch_size, self.chunk_len)
for i in range(self.batch_size):
text_input[i, :] = self.char_tensor(text_str[:-1])
text_target[i, :] = self.char_tensor(text_str[1:])
return text_input.long(), text_target.long()
def generate(self, initial_str="A", predict_len=100, temperature=0.85):
hidden, cell = self.rnn.init_hidden(batch_size=self.batch_size)
initial_input = self.char_tensor(initial_str)
predicted = initial_str
for p in range(len(initial_str) - 1):
_, (hidden, cell) = self.rnn(
initial_input[p].view(1).to(device), hidden, cell
)
last_char = initial_input[-1]
for p in range(predict_len):
output, (hidden, cell) = self.rnn(
last_char.view(1).to(device), hidden, cell
)
output_dist = output.data.view(-1).div(temperature).exp()
top_char = torch.multinomial(output_dist, 1)[0]
predicted_char = all_characters[top_char]
predicted += predicted_char
last_char = self.char_tensor(predicted_char)
return predicted
# input_size, hidden_size, num_layers, output_size
def train(self):
self.rnn = RNN(
n_characters, self.hidden_size, self.num_layers, n_characters
).to(device)
optimizer = torch.optim.Adam(self.rnn.parameters(), lr=self.lr)
criterion = nn.CrossEntropyLoss()
writer = SummaryWriter(f"runs/names0") # for tensorboard
print("=> Starting training")
for epoch in range(1, self.num_epochs + 1):
inp, target = self.get_random_batch()
hidden, cell = self.rnn.init_hidden(batch_size=self.batch_size)
self.rnn.zero_grad()
loss = 0
inp = inp.to(device)
target = target.to(device)
for c in range(self.chunk_len):
output, (hidden, cell) = self.rnn(inp[:, c], hidden, cell)
loss += criterion(output, target[:, c])
loss.backward()
optimizer.step()
loss = loss.item() / self.chunk_len
if epoch % self.print_every == 0:
print(f"Loss: {loss}")
print(self.generate())
writer.add_scalar("Training loss", loss, global_step=epoch)
gennames = Generator()
gennames.train()
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.LSTM",
"torch.nn.CrossEntropyLoss",
"torch.multinomial",
"torch.cuda.is_available",
"torch.nn.Embedding"
] | 1.5.0 | xuyannus/Machine-Learning-Collection | 425d196e9477dbdbbd7cc0d19d29297571746ab5 |
1.8 | from collections import OrderedDict
from typing import List
import torch
from torch import nn
from torchvision.models import densenet121, densenet161, densenet169, densenet201, DenseNet
from .common import EncoderModule, _take, make_n_channel_input
__all__ = ["DenseNetEncoder", "DenseNet121Encoder", "DenseNet169Encoder", "DenseNet161Encoder", "DenseNet201Encoder"]
class DenseNetEncoder(EncoderModule):
def __init__(
self, densenet: DenseNet, strides: List[int], channels: List[int], layers: List[int], first_avg_pool=False
):
if layers is None:
layers = [1, 2, 3, 4]
super().__init__(channels, strides, layers)
def except_pool(block: nn.Module):
del block.pool
return block
self.layer0 = nn.Sequential(
OrderedDict(
[
("conv0", densenet.features.conv0),
("bn0", densenet.features.norm0),
("act0", densenet.features.relu0),
]
)
)
self.avg_pool = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool0 = self.avg_pool if first_avg_pool else densenet.features.pool0
self.layer1 = nn.Sequential(densenet.features.denseblock1, except_pool(densenet.features.transition1))
self.layer2 = nn.Sequential(densenet.features.denseblock2, except_pool(densenet.features.transition2))
self.layer3 = nn.Sequential(densenet.features.denseblock3, except_pool(densenet.features.transition3))
self.layer4 = nn.Sequential(densenet.features.denseblock4)
self._output_strides = _take(strides, layers)
self._output_filters = _take(channels, layers)
@property
def encoder_layers(self):
return [self.layer0, self.layer1, self.layer2, self.layer3, self.layer4]
@property
@torch.jit.unused
def strides(self):
return self._output_strides
@property
@torch.jit.unused
def channels(self):
return self._output_filters
def forward(self, x):
output_features = []
for layer in self.encoder_layers:
output = layer(x)
output_features.append(output)
if layer == self.layer0:
# Fist maxpool operator is not a part of layer0 because we want that layer0 output to have stride of 2
output = self.pool0(output)
else:
output = self.avg_pool(output)
x = output
# Return only features that were requested
return _take(output_features, self._layers)
@torch.jit.unused
def change_input_channels(self, input_channels: int, mode="auto", **kwargs):
self.layer0.conv0 = make_n_channel_input(self.layer0.conv0, input_channels, mode=mode, **kwargs)
return self
class DenseNet121Encoder(DenseNetEncoder):
def __init__(self, layers=None, pretrained=True, memory_efficient=False, first_avg_pool=False):
densenet = densenet121(pretrained=pretrained, memory_efficient=memory_efficient)
strides = [2, 4, 8, 16, 32]
channels = [64, 128, 256, 512, 1024]
super().__init__(densenet, strides, channels, layers, first_avg_pool)
class DenseNet161Encoder(DenseNetEncoder):
def __init__(self, layers=None, pretrained=True, memory_efficient=False, first_avg_pool=False):
densenet = densenet161(pretrained=pretrained, memory_efficient=memory_efficient)
strides = [2, 4, 8, 16, 32]
channels = [96, 192, 384, 1056, 2208]
super().__init__(densenet, strides, channels, layers, first_avg_pool)
class DenseNet169Encoder(DenseNetEncoder):
def __init__(self, layers=None, pretrained=True, memory_efficient=False, first_avg_pool=False):
densenet = densenet169(pretrained=pretrained, memory_efficient=memory_efficient)
strides = [2, 4, 8, 16, 32]
channels = [64, 128, 256, 640, 1664]
super().__init__(densenet, strides, channels, layers, first_avg_pool)
class DenseNet201Encoder(DenseNetEncoder):
def __init__(self, layers=None, pretrained=True, memory_efficient=False, first_avg_pool=False):
densenet = densenet201(pretrained=pretrained, memory_efficient=memory_efficient)
strides = [2, 4, 8, 16, 32]
channels = [64, 128, 256, 896, 1920]
super().__init__(densenet, strides, channels, layers, first_avg_pool)
| [
"torch.nn.Sequential",
"torch.nn.AvgPool2d"
] | 1.8.1 | azkalot1/pytorch-toolbelt | 9d7544fa32a6c6588f9f8c4525ba702700ac01cc |
1.4 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import logging
import os
from typing import Callable, Tuple
import torch
from torch import Tensor, device, dtype, nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .activations import get_activation
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
logger = logging.getLogger(__name__)
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class ModuleUtilsMixin:
"""
A few utilities for torch.nn.Modules, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the module.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
return None
def add_memory_hooks(self):
""" Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero with `model.reset_memory_hooks_state()`
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
def reset_memory_hooks_state(self):
for module in self.modules():
module.mem_rss_diff = 0
module.mem_rss_post_forward = 0
module.mem_rss_pre_forward = 0
@property
def device(self) -> device:
return next(self.parameters()).device
@property
def dtype(self) -> dtype:
return next(self.parameters()).dtype
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
"""type: torch.Tensor -> torch.Tensor"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
return encoder_extended_attention_mask
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: tuple, device: device):
"""Makes broadcastable attention mask and causal mask so that future and maked tokens are ignored.
Arguments:
attention_mask: torch.Tensor with 1 indicating tokens to ATTEND to
input_shape: tuple, shape of input_ids
device: torch.Device, usually self.device
Returns:
torch.Tensor with dtype of attention_mask.dtype
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask, num_hidden_layers):
"""
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
attention_probs has shape bsz x n_heads x N x N
Arguments:
head_mask: torch.Tensor or None: has shape [num_heads] or [num_hidden_layers x num_heads]
num_hidden_layers: int
Returns:
Tensor of shape shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
or list with [None] for each layer
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility
return head_mask
class PreTrainedModel(nn.Module, ModuleUtilsMixin):
r""" Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to do a forward pass in the network.
Returns:
torch.Tensor with dummy inputs
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@property
def base_model(self):
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self):
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (:obj:`nn.Module`):
A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self):
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of whether we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
""" Initialize and prunes weights if needed. """
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory: directory to which to save.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
if getattr(self.config, "xla_device", False):
import torch_xla.core.xla_model as xm
if xm.is_master_ordinal():
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# xm.save takes care of saving only from master
xm.save(model_to_save.state_dict(), output_model_file)
else:
model_to_save.config.save_pretrained(save_directory)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with ``model.train()``
The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path,
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path, postfix=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(archive_file)
else:
msg = (
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url to model weight files named one of {} but "
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
", ".join(cls.pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME],
)
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from transformers import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys
)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys
)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
model.tie_weights() # make sure token embedding weights are still tied if needed
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
if hasattr(config, "xla_device") and config.xla_device:
import torch_xla.core.xla_model as xm
model = xm.send_cpu_data_to_device(model, xm.xla_device())
model = model.to(xm.xla_device())
return model
def prepare_inputs_for_generation(self, input_ids, **kwargs):
return {"input_ids": input_ids}
def prepare_scores_for_generation(self, scores, **kwargs):
return scores
def _use_cache(self, outputs, use_cache):
"""During generation, decide whether to pass the `past` variable to the next forward pass."""
if len(outputs) <= 1 or use_cache is False:
return False
if hasattr(self.config, "mem_len") and self.config.mem_len == 0:
return False
return True
def enforce_repetition_penalty_(self, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty):
"""repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858). """
for i in range(batch_size * num_beams):
for previous_token in set(prev_output_tokens[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[i, previous_token] < 0:
lprobs[i, previous_token] *= repetition_penalty
else:
lprobs[i, previous_token] /= repetition_penalty
@torch.no_grad()
def generate(
self,
input_ids=None,
max_length=None,
min_length=None,
do_sample=None,
early_stopping=None,
num_beams=None,
temperature=None,
top_k=None,
top_p=None,
repetition_penalty=None,
bad_words_ids=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
length_penalty=None,
no_repeat_ngram_size=None,
num_return_sequences=None,
attention_mask=None,
decoder_start_token_id=None,
use_cache=None,
):
r""" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code`_.
.. _`Facebook's XLM beam search code`:
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
Parameters:
input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
The sequence used as a prompt for the generation. If `None` the method initializes
it as an empty `torch.LongTensor` of shape `(1,)`.
max_length: (`optional`) int
The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.
min_length: (`optional`) int
The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
do_sample: (`optional`) bool
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
early_stopping: (`optional`) bool
if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
num_beams: (`optional`) int
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
top_k: (`optional`) int
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
top_p: (`optional`) float
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
repetition_penalty: (`optional`) float
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
pad_token_id: (`optional`) int
Padding token. Default to specicic model pad_token_id or None if it does not exist.
bos_token_id: (`optional`) int
BOS token. Defaults to `bos_token_id` as defined in the models config.
eos_token_id: (`optional`) int
EOS token. Defaults to `eos_token_id` as defined in the models config.
length_penalty: (`optional`) float
Exponential penalty to the length. Default to 1.
no_repeat_ngram_size: (`optional`) int
If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
bad_words_ids: (`optional`) list of lists of int
`bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences: (`optional`) int
The number of independently computed returned sequences for each element in the batch. Default to 1.
attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
Defaults to `None`.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
use_cache: (`optional`) bool
If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.
Return:
output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`
sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# current position and vocab size
if hasattr(self.config, "vocab_size"):
vocab_size = self.config.vocab_size
elif (
self.config.is_encoder_decoder
and hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "vocab_size")
):
vocab_size = self.config.decoder.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert (
decoder_start_token_id is not None
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
)
return output
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
encoder_outputs,
attention_mask,
use_cache,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
sent_lengths = input_ids.new(batch_size).fill_(max_length)
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache
)
outputs = self(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(next_token_logits, batch_size, 1, input_ids, repetition_penalty)
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_tokens = calc_banned_ngram_tokens(input_ids, batch_size, no_repeat_ngram_size, cur_len)
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf")
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
next_token_logits[:, eos_token_id] = -float("inf")
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
probs = F.softmax(next_token_logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
else:
tokens_to_add = next_token
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()
sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
cur_len = cur_len + 1
# if there are different sentences lengths in the batch, some batches have to be padded
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined if batches have different lengths"
# finished sents are filled with pad_token
decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_(pad_token_id)
else:
decoded = input_ids
for hypo_idx, hypo in enumerate(input_ids):
decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]]
return decoded
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
use_cache,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache
)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
next_token_logits, batch_size, num_beams, input_ids, repetition_penalty,
)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solutino
scores = self.prepare_scores_for_generation(scores, cur_len=cur_len, max_length=max_length)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -float("inf")
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence or last iteration
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(), beam_token_score.item(),
)
else:
# add next predicted token if it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# Check if were done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len=cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1)
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
# update current length
cur_len = cur_len + 1
# finalize all open beam hypotheses and end to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() is not eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# shorter batches are filled with pad_token
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)
return decoded
# force one of token_ids to be generated by setting prob of all other tokens to 0.
def _force_token_ids_generation(self, scores, token_ids) -> None:
if isinstance(token_ids, int):
token_ids = [token_ids]
all_but_token_ids_mask = torch.tensor(
[x for x in range(self.config.vocab_size) if x not in token_ids],
dtype=torch.long,
device=next(self.parameters()).device,
)
assert len(scores.shape) == 2, "scores should be of rank 2 with shape: [batch_size, vocab_size]"
scores[:, all_but_token_ids_mask] = -float("inf")
@staticmethod
def _reorder_cache(past: Tuple, beam_idx: Tensor) -> Tuple[Tensor]:
return tuple(layer_past.index_select(1, beam_idx) for layer_past in past)
def calc_banned_ngram_tokens(prev_input_ids: Tensor, num_hypos: int, no_repeat_ngram_size: int, cur_len: int) -> None:
"""Copied from fairseq for no_repeat_ngram in beam_search"""
if cur_len + 1 < no_repeat_ngram_size:
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
def _get_generated_ngrams(hypo_idx):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
start_idx = cur_len + 1 - no_repeat_ngram_size
ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].tolist())
return generated_ngrams[hypo_idx].get(ngram_idx, [])
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
return banned_tokens
def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids):
banned_tokens = []
def _tokens_match(prev_tokens, tokens):
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
if len(tokens) > len(prev_input_ids):
# if bad word tokens are longer then prev input_ids they can't be equal
return False
if prev_tokens[-len(tokens) :] == tokens:
# if tokens match
return True
else:
return False
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in bad_words_ids:
assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format(
bad_words_ids
)
if _tokens_match(prev_input_ids_slice.tolist(), banned_token_seq[:-1]) is False:
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
class BeamHypotheses(object):
def __init__(self, num_beams, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp))
if len(self) > self.num_beams:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)])
del self.beams[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len=None):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
if cur_len is None:
cur_len = self.max_length
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
class Conv1D(nn.Module):
def __init__(self, nf, nx):
""" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
""" Compute SQuAD start_logits from sequence hidden states. """
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, p_mask=None):
""" Args:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
invalid position mask such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
""" Compute SQuAD end_logits from sequence hidden states and start token hidden state.
"""
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
""" Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to hidden_states
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
""" Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
"""
Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
note(Original repo):
no dependency on end_feature so that we can obtain one single `cls_logits`
for each sample
"""
hsz = hidden_states.shape[-1]
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
class SQuADHead(nn.Module):
r""" A SQuAD head inspired by XLNet.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Inputs:
**hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
hidden states of sequence tokens
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the last token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
**is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
Whether the question has a possible answer in the paragraph or not.
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
**start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
Indices for the top config.start_n_top start token possibilities (beam-search).
**end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size,)``
Log probabilities for the ``is_impossible`` label of the answers.
"""
def __init__(self, config):
super().__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
def forward(
self, hidden_states, start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None,
):
outputs = ()
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits,) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
class SequenceSummary(nn.Module):
r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
Args of the config class:
summary_type:
- 'last' => [default] take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj: Add a projection after the vector extraction
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_activation: 'tanh' or another string => add an activation to the output, Other => no activation. Default
summary_first_dropout: Add a dropout before the projection and activation
summary_last_dropout: Add a dropout after the projection and activation
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation: Callable = (get_activation(activation_string) if activation_string else Identity())
self.first_dropout = Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(self, hidden_states, cls_index=None):
""" hidden_states: float Tensor in shape [bsz, ..., seq_len, hidden_size], the hidden-states of the last layer.
cls_index: [optional] position of the classification token if summary_type == 'cls_index',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'cls_index' and cls_index is None:
we take the last token of the sequence as classification token
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long,)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def create_position_ids_from_input_ids(input_ids, padding_idx):
""" Replace non-padding symbols with their position numbers. Position numbers begin at
padding_idx+1. Padding symbols are ignored. This is modified from fairseq's
`utils.make_positions`.
:param torch.Tensor x:
:return torch.Tensor:
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(layer, index, dim=None):
""" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__)) | [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.einsum",
"torch.nn.Parameter",
"torch.multinomial",
"torch.load",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.pad",
"torch.nn.CrossEntropyLoss",
"torch.topk",
"torch.nn.LayerNorm",
"torch.gather",
"torch.nn.init.normal_",
"torch.tensor",
"torch.empty",
"torch.zeros",
"torch.nn.Identity",
"torch.nn.Tanh",
"torch.nn.functional.log_softmax",
"torch.full_like",
"torch.nn.functional.softmax",
"torch.sort",
"torch.argmax",
"torch.cumsum",
"torch.nn.Dropout",
"torch.arange",
"torch.no_grad",
"torch.nn.Embedding"
] | 1.4.0 | HebatallaTarek/Empathy-Mental-Health | 16e2a5f93aabd22803bb39805f8e76c8bea0ccf2 |
1.0 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import random
import re
import shutil
import sys
import tempfile
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
deepspeed_init,
is_deepspeed_zero3_enabled,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from . import __version__
from .configuration_utils import PretrainedConfig
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .debug_utils import DebugOption, DebugUnderflowOverflow
from .dependency_versions_check import dep_version_check
from .file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
PushToHubMixin,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from .modelcard import TrainingSummary
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_torch_generator_available = False
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_torch_generator_available = True
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset` or :obj:`torch.utils.data.dataset.IterableDataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
Note that if it's a :obj:`torch.utils.data.dataset.IterableDataset` with some randomization and you are
training in a distributed fashion, your iterable dataset should either use a internal attribute
:obj:`generator` that is a :obj:`torch.Generator` for the randomization that must be identical on all
processes (and the Trainer will manually set the seed of this :obj:`generator` at each epoch) or have a
:obj:`set_epoch()` method that internally sets the seed of the RNGs used.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full fp16 eval - since the model needs to be half'ed first
# 4. Sharded DDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
if is_sagemaker_mp_enabled():
self.scaler = smp.amp.GradScaler()
elif self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if is_sagemaker_mp_enabled() and self.use_amp and args.max_grad_norm is not None and args.max_grad_norm > 0:
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return dataset
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if not isinstance(self.train_dataset, collections.abc.Sized):
return None
generator = None
if self.args.world_size <= 1 and _is_torch_generator_available:
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
lengths=lengths,
model_input_name=model_input_name,
generator=generator,
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
seed=self.args.seed,
)
else:
if self.args.world_size <= 1:
if _is_torch_generator_available:
return RandomSampler(self.train_dataset, generator=generator)
return RandomSampler(self.train_dataset)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
else:
return DistributedSampler(
self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
if isinstance(train_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self.args.train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
# Deprecated code
if self.args.use_legacy_prediction_loop:
if is_torch_tpu_available():
return SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return ShardSampler(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
if isinstance(test_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method (or :obj:`create_optimizer`
and/or :obj:`create_scheduler`) in a subclass.
"""
self.create_optimizer()
self.create_scheduler(num_training_steps)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
def create_scheduler(self, num_training_steps: int):
"""
Setup the scheduler. The optimizer of the trainer must have been set up before this method is called.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
"""HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
if self.args.deepspeed:
# Rebuild the deepspeed config to reflect the updated training parameters
from transformers.integrations import DeepSpeedConfigHF
self.args.deepspeed_config_hf = DeepSpeedConfigHF(self.args)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_dp_enabled():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if args.fp16_full_eval and not args.do_train:
self.model = self.model.to(args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None:
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}).")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warn(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if args.deepspeed:
# will be resumed in deepspeed_init
pass
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = len(self.train_dataset) * args.num_train_epochs
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = args.max_steps
num_train_epochs = int(args.num_train_epochs)
num_update_steps_per_epoch = max_steps
num_train_samples = args.max_steps * total_train_batch_size
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
num_examples = (
self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
"flag to your launch command, but you will resume the training on data already seen by your model."
)
if self.is_local_process_zero() and not args.disable_tqdm:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
((step + 1) % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _load_state_dict_in_model(self, state_dict):
load_result = self.model.load_state_dict(state_dict, strict=False)
if len(load_result.missing_keys) != 0:
if set(load_result.missing_keys) == set(self.model._keys_to_ignore_on_save):
self.model.tie_weights()
else:
logger.warn(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warn(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank != -1:
rng_file = os.path.join(checkpoint, f"rng_state_{local_rank}.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if torch.cuda.is_available():
if self.args.local_rank != -1:
torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
else:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_fp16_weights_on_model_save` is True
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
if smp.dp_rank() == 0:
# Consolidate the state dict on all processed of dp_rank 0
opt_state_dict = self.optimizer.state_dict()
# Save it and the scheduler on the main process
if self.is_world_process_zero():
torch.save(opt_state_dict, os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.local_rank == -1:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank == -1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{local_rank}.pth"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.use_amp and os.path.isfile(os.path.join(checkpoint, "scaler.pt")):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, "scaler.pt")))
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
scaler = self.scaler if self.use_amp else None
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps, scaler=scaler)
return loss_mb.reduce_mean().detach().to(self.args.device)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
try:
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
except IndexError:
loss = outputs["loss"] if isinstance(outputs, dict) else outputs.item()
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
state_dict = self.model_wrapped.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.is_world_process_zero():
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.is_world_process_zero():
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_fp16_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
self.deepspeed.save_fp16_model(output_dir, WEIGHTS_NAME)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.local_rank != -1:
self.state.total_flos += distributed_broadcast_scalars([self.current_flos]).sum().item()
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_mode_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
logger.info(f"***** Running {description} *****")
if isinstance(dataloader.dataset, collections.abc.Sized):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = dataloader.dataset
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if not isinstance(eval_dataset, IterableDataset):
num_samples = len(eval_dataset)
elif isinstance(eval_dataset, IterableDatasetShard):
num_samples = eval_dataset.num_examples
else:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
if tensor.shape[1] == max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def create_model_card(
self,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Optional[str] = None,
model_name: Optional[str] = None,
finetuned_from: Optional[str] = None,
dataset_tags: Optional[Union[str, List[str]]] = None,
dataset: Optional[Union[str, List[str]]] = None,
dataset_args: Optional[Union[str, List[str]]] = None,
):
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
f.write(model_card)
def push_to_hub(
self,
repo_name: Optional[str] = None,
repo_url: Optional[str] = None,
commit_message: Optional[str] = "add model",
organization: Optional[str] = None,
private: bool = None,
use_auth_token: Optional[Union[bool, str]] = None,
**kwargs,
):
"""
Upload `self.model` to the 🤗 model hub.
Parameters:
repo_name (:obj:`str`, `optional`):
Repository name for your model or tokenizer in the hub. If not specified and :obj:`repo_url` is not
specified either, will default to the stem of :obj:`self.args.output_dir`.
repo_url (:obj:`str`, `optional`):
Specify this in case you want to push to an existing repository in the hub. If unspecified, a new
repository will be created in your namespace (unless you specify an :obj:`organization`) with
:obj:`repo_name`.
commit_message (:obj:`str`, `optional`, defaults to :obj:`"add model"`):
Message to commit while pushing.
organization (:obj:`str`, `optional`):
Organization in which you want to push your model or tokenizer (you must be a member of this
organization).
private (:obj:`bool`, `optional`):
Whether or not the repository created should be private (requires a paying subscription).
use_auth_token (:obj:`bool` or :obj:`str`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). Will default to
:obj:`True` if :obj:`repo_url` is not specified.
kwargs:
Additional keyword arguments passed along to :meth:`~transformers.Trainer.create_model_card`.
Returns:
The url of the commit of your model in the given repository.
"""
if not self.is_world_process_zero():
return
if not isinstance(unwrap_model(self.model), PushToHubMixin):
raise ValueError(
"The `upload_model_to_hub` method only works for models that inherit from `PushToHubMixin` models."
)
if repo_url is None and repo_name is None:
repo_name = Path(self.args.output_dir).name
if repo_name is not None:
model_name = repo_name
elif repo_url is not None:
model_name = repo_url.split("/")[-1]
else:
model_name = None
self.create_model_card(model_name=model_name, **kwargs)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy(os.path.join(self.args.output_dir, "README.md"), os.path.join(tmp_dir, "README.md"))
unwrap_model(self.model).save_pretrained(tmp_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(tmp_dir)
return unwrap_model(self.model)._push_to_hub(
save_directory=tmp_dir,
repo_name=repo_name,
repo_url=repo_url,
commit_message=commit_message,
organization=organization,
private=private,
use_auth_token=use_auth_token,
)
#
# Deprecated code
#
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
| [
"torch.cat",
"torch.cuda.amp.autocast",
"torch.Generator",
"torch.cuda.random.set_rng_state",
"torch.random.get_rng_state",
"torch.cuda.random.set_rng_state_all",
"torch.cuda.is_available",
"torch.load",
"torch.cuda.random.get_rng_state_all",
"torch.nn.DataParallel",
"torch.utils.data.sampler.RandomSampler",
"torch.tensor",
"torch.empty",
"torch.random.set_rng_state",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.sampler.SequentialSampler",
"torch.cuda.amp.GradScaler",
"torch.distributed.barrier",
"torch.cuda.random.get_rng_state",
"torch.distributed.get_local_rank",
"torch.utils.data.dataloader.DataLoader",
"torch.no_grad",
"torch.utils.data.distributed.DistributedSampler"
] | 1.0 | boubakerwa/transformers | 8ed998f6456a60999d6f9a8a6b0094ab2da7473d |
0.4 | import torch.nn as nn
import torch.nn.functional as F
import torch
import argparse
class UPDeT(nn.Module):
def __init__(self, input_shape, args):
super(UPDeT, self).__init__()
self.args = args
self.transformer = Transformer(args.token_dim, args.emb, args.heads, args.depth, args.emb)
self.q_basic = nn.Linear(args.emb, 6)
def init_hidden(self):
# make hidden states on same device as model
return torch.zeros(1, self.args.emb).cuda()
def forward(self, inputs, hidden_state, task_enemy_num, task_ally_num):
outputs, _ = self.transformer.forward(inputs, hidden_state, None)
# first output for 6 action (no_op stop up down left right)
q_basic_actions = self.q_basic(outputs[:, 0, :])
# last dim for hidden state
h = outputs[:, -1:, :]
q_enemies_list = []
# each enemy has an output Q
for i in range(task_enemy_num):
q_enemy = self.q_basic(outputs[:, 1 + i, :])
q_enemy_mean = torch.mean(q_enemy, 1, True)
q_enemies_list.append(q_enemy_mean)
# concat enemy Q over all enemies
q_enemies = torch.stack(q_enemies_list, dim=1).squeeze()
# concat basic action Q with enemy attack Q
q = torch.cat((q_basic_actions, q_enemies), 1)
return q, h
class SelfAttention(nn.Module):
def __init__(self, emb, heads=8, mask=False):
super().__init__()
self.emb = emb
self.heads = heads
self.mask = mask
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
def forward(self, x, mask):
b, t, e = x.size()
h = self.heads
keys = self.tokeys(x).view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x).view(b, t, h, e)
# compute scaled dot-product self-attention
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1 / 4))
keys = keys / (e ** (1 / 4))
# - Instead of dividing the dot products by sqrt(e), we scale the keys and values.
# This should be more memory efficient
# - get dot product of queries and keys, and scale
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b * h, t, t)
if self.mask: # mask out the upper half of the dot matrix, excluding the diagonal
mask_(dot, maskval=float('-inf'), mask_diagonal=False)
if mask is not None:
dot = dot.masked_fill(mask == 0, -1e9)
dot = F.softmax(dot, dim=2)
# - dot now has row-wise self-attention probabilities
# apply the self attention to the values
out = torch.bmm(dot, values).view(b, h, t, e)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
return self.unifyheads(out)
class TransformerBlock(nn.Module):
def __init__(self, emb, heads, mask, ff_hidden_mult=4, dropout=0.0):
super().__init__()
self.attention = SelfAttention(emb, heads=heads, mask=mask)
self.mask = mask
self.norm1 = nn.LayerNorm(emb)
self.norm2 = nn.LayerNorm(emb)
self.ff = nn.Sequential(
nn.Linear(emb, ff_hidden_mult * emb),
nn.ReLU(),
nn.Linear(ff_hidden_mult * emb, emb)
)
self.do = nn.Dropout(dropout)
def forward(self, x_mask):
x, mask = x_mask
attended = self.attention(x, mask)
x = self.norm1(attended + x)
x = self.do(x)
fedforward = self.ff(x)
x = self.norm2(fedforward + x)
x = self.do(x)
return x, mask
class Transformer(nn.Module):
def __init__(self, input_dim, emb, heads, depth, output_dim):
super().__init__()
self.num_tokens = output_dim
self.token_embedding = nn.Linear(input_dim, emb)
tblocks = []
for i in range(depth):
tblocks.append(
TransformerBlock(emb=emb, heads=heads, mask=False))
self.tblocks = nn.Sequential(*tblocks)
self.toprobs = nn.Linear(emb, output_dim)
def forward(self, x, h, mask):
tokens = self.token_embedding(x)
tokens = torch.cat((tokens, h), 1)
b, t, e = tokens.size()
x, mask = self.tblocks((tokens, mask))
x = self.toprobs(x.view(b * t, e)).view(b, t, self.num_tokens)
return x, tokens
def mask_(matrices, maskval=0.0, mask_diagonal=True):
b, h, w = matrices.size()
indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
matrices[:, indices[0], indices[1]] = maskval
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Unit Testing')
parser.add_argument('--token_dim', default='5', type=int)
parser.add_argument('--emb', default='32', type=int)
parser.add_argument('--heads', default='3', type=int)
parser.add_argument('--depth', default='2', type=int)
parser.add_argument('--ally_num', default='5', type=int)
parser.add_argument('--enemy_num', default='5', type=int)
parser.add_argument('--episode', default='20', type=int)
args = parser.parse_args()
# testing the agent
agent = UPDeT(None, args).cuda()
hidden_state = agent.init_hidden().cuda().expand(args.ally_num, 1, -1)
tensor = torch.rand(args.ally_num, args.ally_num+args.enemy_num, args.token_dim).cuda()
q_list = []
for _ in range(args.episode):
q, hidden_state = agent.forward(tensor, hidden_state, args.ally_num, args.enemy_num)
q_list.append(q)
| [
"torch.nn.Linear",
"torch.rand",
"torch.cat",
"torch.nn.LayerNorm",
"torch.nn.Dropout",
"torch.zeros",
"torch.triu_indices",
"torch.stack",
"torch.nn.Sequential",
"torch.bmm",
"torch.nn.ReLU",
"torch.nn.functional.softmax",
"torch.mean"
] | 0.4.1 | DH-O/UPDeT | 5c73feef31bebed59b0a35873a7133f319ec868b |
1.2 | import os
import hydra
import torch
import logging
import torch.nn as nn
from torch import optim
from hydra import utils
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
# self
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
import DeepKE.models as models
from DeepKE.tools import preprocess, CustomDataset, collate_fn, train, validate
from DeepKE.utils import manual_seed, load_pkl
logger = logging.getLogger(__name__)
@hydra.main(config_path='../conf/config.yaml')
def main(cfg):
cwd = utils.get_original_cwd()
cwd = cwd[0:-5]
cfg.cwd = cwd
cfg.pos_size = 2 * cfg.pos_limit + 2
logger.info(f'\n{cfg.pretty()}')
__Model__ = {
'cnn': models.PCNN,
'rnn': models.BiLSTM,
'transformer': models.Transformer,
'gcn': models.GCN,
'capsule': models.Capsule,
'lm': models.LM,
}
# device
if cfg.use_gpu and torch.cuda.is_available():
device = torch.device('cuda', cfg.gpu_id)
else:
device = torch.device('cpu')
logger.info(f'device: {device}')
# 如果不修改预处理的过程,这一步最好注释掉,不用每次运行都预处理数据一次
if cfg.preprocess:
preprocess(cfg)
train_data_path = os.path.join(cfg.cwd, cfg.out_path, 'train.pkl')
valid_data_path = os.path.join(cfg.cwd, cfg.out_path, 'valid.pkl')
test_data_path = os.path.join(cfg.cwd, cfg.out_path, 'test.pkl')
vocab_path = os.path.join(cfg.cwd, cfg.out_path, 'vocab.pkl')
if cfg.model_name == 'lm':
vocab_size = None
else:
vocab = load_pkl(vocab_path)
vocab_size = vocab.count
cfg.vocab_size = vocab_size
train_dataset = CustomDataset(train_data_path)
valid_dataset = CustomDataset(valid_data_path)
test_dataset = CustomDataset(test_data_path)
train_dataloader = DataLoader(train_dataset, batch_size=cfg.batch_size,
shuffle=True, collate_fn=collate_fn(cfg))
valid_dataloader = DataLoader(valid_dataset, batch_size=cfg.batch_size,
shuffle=True, collate_fn=collate_fn(cfg))
test_dataloader = DataLoader(test_dataset, batch_size=cfg.batch_size,
shuffle=True, collate_fn=collate_fn(cfg))
model = __Model__[cfg.model_name](cfg)
model.to(device)
logger.info(f'\n {model}')
optimizer = optim.Adam(model.parameters(), lr=cfg.learning_rate,
weight_decay=cfg.weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=cfg.lr_factor,
patience=cfg.lr_patience)
criterion = nn.CrossEntropyLoss()
best_f1, best_epoch = -1, 0
es_loss, es_f1, es_epoch, es_patience, best_es_epoch, best_es_f1, \
es_path, best_es_path = 1e8, -1, 0, 0, 0, -1, '', ''
train_losses, valid_losses = [], []
if cfg.show_plot and cfg.plot_utils == 'tensorboard':
writer = SummaryWriter('tensorboard')
else:
writer = None
logger.info('=' * 10 + ' Start training ' + '=' * 10)
for epoch in range(1, cfg.epoch + 1):
manual_seed(cfg.seed + epoch)
train_loss = train(epoch, model, train_dataloader, optimizer, criterion,
device, writer, cfg)
valid_f1, valid_loss = validate(epoch, model, valid_dataloader,
criterion, device, cfg)
scheduler.step(valid_loss)
model_path = model.save(epoch, cfg)
# logger.info(model_path)
train_losses.append(train_loss)
valid_losses.append(valid_loss)
if best_f1 < valid_f1:
best_f1 = valid_f1
best_epoch = epoch
# 使用 valid loss 做 early stopping 的判断标准
if es_loss > valid_loss:
es_loss = valid_loss
es_f1 = valid_f1
es_epoch = epoch
es_patience = 0
es_path = model_path
else:
es_patience += 1
if es_patience >= cfg.early_stopping_patience:
best_es_epoch = es_epoch
best_es_f1 = es_f1
best_es_path = es_path
if cfg.show_plot:
if cfg.plot_utils == 'matplot':
plt.plot(train_losses, 'x-')
plt.plot(valid_losses, '+-')
plt.legend(['train', 'valid'])
plt.title('train/valid comparison loss')
plt.show()
if cfg.plot_utils == 'tensorboard':
for i in range(len(train_losses)):
writer.add_scalars('train/valid_comparison_loss', {
'train': train_losses[i],
'valid': valid_losses[i]
}, i)
writer.close()
logger.info(
f'best(valid loss quota) early stopping epoch: {best_es_epoch}, '
f'this epoch macro f1: {best_es_f1:0.4f}')
logger.info(f'this model save path: {best_es_path}')
logger.info(
f'total {cfg.epoch} epochs, best(valid macro f1) epoch: {best_epoch}, '
f'this epoch macro f1: {best_f1:.4f}')
logger.info('=====end of training====')
logger.info('')
logger.info('=====start test performance====')
validate(-1, model, test_dataloader, criterion, device, cfg)
logger.info('=====ending====')
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.CrossEntropyLoss"
] | 1.2 | dongyingname/DeepKE | e5511bdad739a36cabd8439faebfceee22aebccb |
1.4 | # Adapted from code from the book "Deep Reinforcement Learning" by Maxim Lapan
import numpy as np
import torch
from lib import environ
def validation_run(env, net, episodes=100, device="cpu", epsilon=0.02, comission=0.1):
stats = {
"episode_reward": [],
"episode_steps": [],
"order_profits": [],
"order_steps": [],
}
for episode in range(episodes):
obs = env.reset()
total_reward = 0.0
position = None
position_steps = None
episode_steps = 0
while True:
obs_v = torch.tensor([obs]).to(device)
out_v = net(obs_v)
action_idx = out_v.max(dim=1)[1].item()
if np.random.random() < epsilon:
action_idx = env.action_space.sample()
action = environ.Actions(action_idx)
close_price = env._state._cur_close()
if action == environ.Actions.Buy and position is None:
position = close_price
position_steps = 0
elif action == environ.Actions.Close and position is not None:
profit = (
close_price - position - (close_price + position) * comission / 100
)
profit = 100.0 * profit / position
stats["order_profits"].append(profit)
stats["order_steps"].append(position_steps)
position = None
position_steps = None
obs, reward, done, _ = env.step(action_idx)
total_reward += reward
episode_steps += 1
if position_steps is not None:
position_steps += 1
if done:
if position is not None:
profit = (
close_price
- position
- (close_price + position) * comission / 100
)
profit = 100.0 * profit / position
stats["order_profits"].append(profit)
stats["order_steps"].append(position_steps)
break
stats["episode_reward"].append(total_reward)
stats["episode_steps"].append(episode_steps)
return {key: np.mean(vals) for key, vals in stats.items()}
| [
"torch.tensor"
] | 1.4.0 | ankile/reinforcement-trading | 849ba30d8be05abf1e9eae919463c4eebe812ce8 |
1.9 | import heapq
import pathlib
import pickle
import string
from collections import defaultdict
from typing import List, Tuple
import click
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import torch
from pydantic import BaseModel
from spacy.lang.en.stop_words import STOP_WORDS
from tqdm.auto import tqdm
from src import preprocess
from src.helpers import constants
from src.models import TrainAll, ours
BAD_WORDS = (
STOP_WORDS
| set(string.punctuation)
| {"‘", '"', "’", "“", "”", "–", "...", "nt", "10", "20"}
)
class ADGConfig(BaseModel):
num_top_words: int = 20
num_connections: int = 3
node_size_range: Tuple[int, int] = (300, 3000)
edge_size_range: Tuple[float, float] = (0.0, 0.8) # between zero and one
DEFAULTS = {
"soccer": ADGConfig(
num_top_words=30,
),
}
def build_config(subreddit: str) -> ADGConfig:
assert subreddit in DEFAULTS, "No config found. Please add it to DEFAULTS!"
return DEFAULTS[subreddit]
def generate_attention_weights(name: str) -> List[Tuple[str, torch.Tensor]]:
device = next(iter(constants.DEVICES)) # Pick any GPU.
with (constants.MODELS_DIR / f"{name}.model").open("rb") as f:
wrapper = pickle.load(f)
model = wrapper.model.to(device)
config = wrapper.config
assert isinstance(
config.training_scheme, TrainAll
), "Top k models should be TrainAll!"
rows, _ = preprocess.load(config.subreddit)
row_tokens, title_tokens, _ = ours._tokenize(rows, config.subreddit)
result = []
model.eval() # Disable dropout
with torch.no_grad():
for title, vec in zip(
title_tokens, tqdm(row_tokens, desc="Collecting weights")
):
vec = vec.to(device).float()
vec = model.attn(vec, vec, vec)[1].cpu().numpy()
result.append((title, vec[0])) # vec is 1 x n x n
return result
def _compute_graph(weights, config: ADGConfig):
word_to_words_weight = defaultdict(lambda: defaultdict(int))
word_to_word_freq = defaultdict(int)
for title, title_weights in tqdm(weights, desc="Calculating word weights"):
for i, out_word in enumerate(title):
out_word = out_word.lower()
if out_word not in BAD_WORDS:
for j, in_word in enumerate(title):
in_word = in_word.lower()
if in_word not in BAD_WORDS:
word_to_words_weight[out_word][in_word] += title_weights[i][j]
word_to_word_freq[out_word] += 1
word_to_words_avg_weight = defaultdict(dict)
for out_word in tqdm(word_to_words_weight, desc="Normalizing"):
for in_word in word_to_words_weight[out_word]:
word_to_words_avg_weight[out_word][in_word] = (
word_to_words_weight[out_word][in_word] / word_to_word_freq[out_word]
)
# This will be populated soon!
nodes = [] # (node, frequency)
edges = [] # (x, y, weight)
# Generate the graph, solely of words that "make the cut"/
_node_names = set()
_top_words = heapq.nlargest(
config.num_top_words, word_to_word_freq.keys(), key=word_to_word_freq.get
)
for out_word in _top_words:
_connections = heapq.nlargest(
config.num_connections,
set(word_to_words_avg_weight[out_word].keys()) - {out_word},
key=word_to_words_avg_weight[out_word].get,
)
for in_word in _connections:
weight = word_to_words_avg_weight[out_word][in_word]
edges.append((out_word, in_word, weight))
_node_names |= {out_word, in_word}
# Compute all mentioned nodes
nodes = [(n, word_to_word_freq[n]) for n in _node_names]
# Normalize nodes:
_node_weights = np.array([w for n, w in nodes], dtype=np.float32)
_node_weights = np.interp(
_node_weights,
(_node_weights.min(), _node_weights.max()),
config.node_size_range,
)
nodes = [(n, w) for (n, _), w in zip(nodes, _node_weights)]
# Normalize edges:
_edge_weights = np.array([w for _, _, w in edges], dtype=np.float32)
_edge_weights = np.interp(
_edge_weights,
(_edge_weights.min(), _edge_weights.max()),
config.edge_size_range,
)
edges = [(a, b, w) for (a, b, _), w in zip(edges, _edge_weights)]
return nodes, edges
def _visualize(nodes, edges, output_path: str):
k = 5
nodes = nodes[k:] + nodes[:k]
G = nx.DiGraph()
G.add_nodes_from([n for n, _ in nodes])
G.add_weighted_edges_from(edges)
plt.figure(1, figsize=(8, 8), dpi=200)
nx.draw(
G,
nx.circular_layout(G),
node_size=[w for _, w in nodes],
edge_color=[(w, w, w) for _, _, w in edges],
with_labels=True,
font_size=15,
alpha=0.9,
node_color="#B4CDED",
arrowstyle="->",
)
plt.savefig(output_path)
@click.command()
@click.option(
"--subreddit",
type=str,
required=True,
help="Subreddit we are running on (for config)",
)
@click.option(
"--model_name",
type=str,
required=True,
help="Model to load for processing title.",
)
@click.option(
"--output_name",
type=str,
required=True,
help="Output file name for visualization.",
)
def generate(subreddit: str, model_name: str, output_name: str):
output_path = constants.FIGURES_DIR / output_name
config = build_config(subreddit)
weights = generate_attention_weights(model_name)
nodes, edges = _compute_graph(weights, config)
_visualize(nodes, edges, output_path)
if __name__ == "__main__":
generate()
| [
"torch.no_grad"
] | 1.9.1 | evanweissburg/judging-a-book | b273988485047e1496387e91529ed25b6688b88f |
1.0 | # Copyright (c) 2020 mingruimingrui
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the MIT-style license found in the
# LICENSE file in the root directory of this source tree.
"""LASER LSTM sequence encoder"""
import torch
from torch import nn
from typing import NamedTuple, Optional, Tuple
class EncoderOuts(NamedTuple):
sentemb: torch.Tensor
encoder_out: Optional[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]
encoder_padding_mask: Optional[torch.Tensor]
class Encoder(nn.Module):
def __init__(
self,
num_embeddings: int,
padding_idx: int,
embed_dim: int = 320,
hidden_size: int = 512,
num_layers: int = 1,
bidirectional: bool = False,
padding_value: float = 0.0
):
"""LSTM based sequence encoder
Arguments:
num_embeddings {int} -- Number of unique token embeddings
padding_idx {int} -- Padding token id
Keyword Arguments:
embed_dim {int} -- Embedding dimension size (default: {320})
hidden_size {int} -- Hidden layer dimension size (default: {512})
num_layers {int} -- Number of LSTM layers (default: {1})
bidirectional {bool} -- Use Bidirectional LSTM (default: {False})
padding_value {float} --
Value to pad hidden layers (default: {0.0})
"""
super().__init__()
self.num_embeddings = int(num_embeddings)
self.padding_idx = int(padding_idx)
self.embed_dim = int(embed_dim)
self.hidden_size = int(hidden_size)
self.num_layers = int(num_layers)
self.bidirectional = bool(bidirectional)
self.padding_value = float(padding_value)
self.embed_tokens = nn.Embedding(
num_embeddings=self.num_embeddings,
embedding_dim=self.embed_dim,
padding_idx=self.padding_idx
)
self.lstm = nn.LSTM(
input_size=self.embed_dim,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
bidirectional=self.bidirectional
)
self.num_directions = 2 if self.bidirectional else 1
self.output_units = self.num_directions * self.hidden_size
def forward(
self,
src_tokens: torch.LongTensor,
src_lengths: torch.LongTensor,
return_encoder_out: bool = False,
return_encoder_padding_mask: bool = False,
) -> EncoderOuts:
"""Encode a batch of sequences
Arguments:
src_tokens {torch.LongTensor} -- [batch_size, seq_len]
src_lengths {torch.LongTensor} -- [batch_size]
Keyword Arguments:
return_encoder_out {bool} --
Return output tensors? (default: {False})
return_encoder_padding_mask {bool} --
Return encoder padding mask? (default: {False})
Returns:
[type] -- [description]
"""
bsz, seqlen = src_tokens.size()
x = self.embed_tokens(src_tokens)
x = x.transpose(0, 1) # BTC -> TBC
# Pack then apply LSTM
packed_x = nn.utils.rnn.pack_padded_sequence(
x, src_lengths, batch_first=False, enforce_sorted=True)
packed_outs, (final_hiddens, final_cells) = \
self.lstm.forward(packed_x)
x, _ = nn.utils.rnn.pad_packed_sequence(
packed_outs, padding_value=self.padding_value)
assert list(x.size()) == [seqlen, bsz, self.output_units]
# Set padded outputs to -inf so they are not selected by max-pooling
padding_mask = src_tokens.eq(self.padding_idx).t()
if padding_mask.any():
x = x.float().masked_fill_(
mask=padding_mask.unsqueeze(-1),
value=float('-inf'),
).type_as(x)
# Build the sentence embedding by max-pooling over the encoder outputs
sentemb = x.max(dim=0)[0]
encoder_out = None
if return_encoder_out:
final_hiddens = self._combine_outs(final_hiddens)
final_cells = self._combine_outs(final_cells)
encoder_out = (x, final_hiddens, final_cells)
encoder_padding_mask = None
if return_encoder_padding_mask:
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
return EncoderOuts(
sentemb=sentemb,
encoder_out=encoder_out,
encoder_padding_mask=encoder_padding_mask
)
def _combine_outs(self, x: torch.Tensor) -> torch.Tensor:
"""Combines outputs for same layer same entry in batch.
Only used for BiLSTM.
Arguments:
outs {torch.Tensor} -- [num_layers * num_dir, bsz, hidden_size]
Returns:
torch.Tensor -- [num_layers, bsz, num_dir * hidden_size]
"""
# [num_layers * num_dir, bsz, hidden_size]
# -> [num_layers, num_dir, bsz, hidden_size]
# -> [num_layers, bsz, num_dir, hidden_size]
# -> [num_layers, bsz, num_dir * hidden_size]
if self.num_directions == 1:
return x
x = x.reshape(
self.num_layers,
self.num_directions,
-1,
self.hidden_size
).transpose(1, 2)
x = x.transpose(1, 2)
return x.reshape(self.num_layers, -1, self.output_units)
def load_encoder_from_file(model_path: str) -> Tuple[Encoder, dict]:
# Load state_dict
state_dict = torch.load(model_path, map_location='cpu')
if 'left_pad' in state_dict['params']:
del state_dict['params']['left_pad']
# Create encoder
encoder = Encoder(**state_dict['params'])
encoder.load_state_dict(state_dict['model'])
encoder.eval()
return encoder, state_dict['dictionary']
| [
"torch.nn.LSTM",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.load",
"torch.nn.Embedding"
] | 1.0 | mingruimingrui/laser-keep-alive | 9f06ec825d7a8aadf46f1f1c96dae2537b101b17 |
1.10 | import torch
from torch import nn
import torch.nn.functional as F
import math
from backbone.repvgg import get_RepVGG_func_by_name
import utils
class SixDRepNet(nn.Module):
def __init__(self,
backbone_name, backbone_file, deploy,
bins=(1, 2, 3, 6),
droBatchNorm=nn.BatchNorm2d,
pretrained=True):
super(SixDRepNet, self).__init__()
repvgg_fn = get_RepVGG_func_by_name(backbone_name)
backbone = repvgg_fn(deploy)
if pretrained:
checkpoint = torch.load(backbone_file)
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
ckpt = {k.replace('module.', ''): v for k,
v in checkpoint.items()} # strip the names
backbone.load_state_dict(ckpt)
self.layer0, self.layer1, self.layer2, self.layer3, self.layer4 = backbone.stage0, backbone.stage1, backbone.stage2, backbone.stage3, backbone.stage4
self.gap = nn.AdaptiveAvgPool2d(output_size=1)
last_channel = 0
for n, m in self.layer4.named_modules():
if ('rbr_dense' in n or 'rbr_reparam' in n) and isinstance(m, nn.Conv2d):
last_channel = m.out_channels
fea_dim = last_channel
self.linear_reg = nn.Linear(fea_dim, 6)
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x= self.gap(x)
x = torch.flatten(x, 1)
x = self.linear_reg(x)
return utils.compute_rotation_matrix_from_ortho6d(x)
class SixDRepNet2(nn.Module):
def __init__(self, block, layers, fc_layers=1):
self.inplanes = 64
super(SixDRepNet2, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.linear_reg = nn.Linear(512*block.expansion,6)
# Vestigial layer from previous experiments
self.fc_finetune = nn.Linear(512 * block.expansion + 3, 3)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.linear_reg(x)
out = utils.compute_rotation_matrix_from_ortho6d(x)
return out | [
"torch.nn.Linear",
"torch.flatten",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.load",
"torch.nn.AdaptiveAvgPool2d"
] | 1.10.1 | arsalan0004/6DRepNet | aa28f95516cacb1cac31313b981b9fd3f51f5c28 |
1.7 | import os
import argparse
import gc
from PIL import Image
import numpy as np
import torch
from torchvision.transforms.functional import to_tensor, to_pil_image
from model import Generator
gc.collect()
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def load_image(image_path, x32=False):
img = Image.open(image_path).convert("RGB")
if x32:
def to_32s(x):
return 256 if x < 256 else x - x % 32
w, h = img.size
img = img.resize((to_32s(w), to_32s(h)))
return img
def test(args):
device = args.device
net = Generator()
net.load_state_dict(torch.load(args.checkpoint, map_location="cpu"))
net.to(device).eval()
print(f"model loaded: {args.checkpoint}")
os.makedirs(args.output_dir, exist_ok=True)
for image_name in sorted(os.listdir(args.input_dir)):
if os.path.splitext(image_name)[-1].lower() not in [".jpg", ".png", ".bmp", ".tiff"]:
continue
image = load_image(os.path.join(args.input_dir, image_name), args.x32)
with torch.no_grad():
image = to_tensor(image).unsqueeze(0) * 2 - 1
out = net(image.to(device), args.upsample_align).cpu()
out = out.squeeze(0).clip(-1, 1) * 0.5 + 0.5
out = to_pil_image(out)
out.save(os.path.join(args.output_dir, image_name))
print(f"image saved: {image_name}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint',
type=str,
default='./weights/paprika.pt',
)
parser.add_argument(
'--input_dir',
type=str,
default='./samples/inputs',
)
parser.add_argument(
'--output_dir',
type=str,
default='./samples/results',
)
parser.add_argument(
'--device',
type=str,
default='cuda:0',
)
parser.add_argument(
'--upsample_align',
type=bool,
default=False,
help="Align corners in decoder upsampling layers"
)
parser.add_argument(
'--x32',
action="store_true",
help="Resize images to multiple of 32"
)
args = parser.parse_args()
test(args)
| [
"torch.no_grad",
"torch.load"
] | 1.7.1 | codetyphon/AnimeGAN-web | 6a49296cc039bd86d133a8d023ad94199c19ed2b |
1.4 | #!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torch.distributed as dist
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint,\
convert_splitbn_model, model_parameters
from timm.utils import *
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('data_dir', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--model', default='resnet101', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--pretrained-path', default='', type=str, metavar='PATH',
help='Load from original checkpoint and pretrain (default: none) (with --pretrained)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',
help='ratio of validation batch size to training batch size (default: 1)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0001,
help='weight decay (default: 0.0001)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# Learning rate schedule parameters
parser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='const',
help='Random erase mode (default: "const")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
parser.add_argument('--repeated-aug', action='store_true')
parser.set_defaults(repeated_aug=True)
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default="reduce",
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',
help='number of checkpoints to keep (default: 10)')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
parser.add_argument('--fake-separated-loss-log', action='store_true', default=False,
help='log loss separated by fake or not')
parser.add_argument('--pause', type=int, default=None,
help='pause training at the epoch')
# distributed training
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--global_rank", default=0, type=int)
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--device', default=None, type=int,
help='GPU id to use.')
# original params
parser.add_argument('--separate-rate', type=float, default=1.0,
help='Ratio of how much of the sorting task to consider')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
args.prefetcher = not args.no_prefetcher
args.distributed = True
if args.device is not None:
print("Use GPU: {} for training".format(args.device))
if args.distributed:
# initialize torch.distributed using MPI
# from mpi4py import MPI
# comm = MPI.COMM_WORLD
# world_size = comm.Get_size()
# rank = comm.Get_rank()
# init_method = 'tcp://{}:23456'.format(args.dist_url)
master_addr = os.getenv("MASTER_ADDR", default="localhost")
master_port = os.getenv('MASTER_PORT', default='8888')
method = "tcp://{}:{}".format(master_addr, master_port)
rank = int(os.getenv('OMPI_COMM_WORLD_RANK', '0'))
world_size = int(os.getenv('OMPI_COMM_WORLD_SIZE', '1'))
ngpus_per_node = torch.cuda.device_count()
device = rank % ngpus_per_node
torch.cuda.set_device(device)
torch.distributed.init_process_group('nccl', init_method=method, world_size=world_size, rank=rank)
args.local_rank = device
args.global_rank = rank
args.device = device
args.world_size = world_size
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.global_rank, args.world_size))
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
random_seed(args.seed, args.global_rank)
if args.log_wandb and args.global_rank == 0:
if has_wandb:
wandb.init(project="pytorch-image-models", name=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint,
pretrained_path=args.pretrained_path,
separate_flg=True)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.global_rank == 0:
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=args.global_rank == 0)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp != 'native':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.global_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.global_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.global_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.global_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.global_rank == 0)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp != 'native':
# Apex DDP preferred unless native amp is activated
if args.global_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.global_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.global_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
# if needed, load dataset from torch
if args.dataset == 'CIFAR10':
args.data_dir = f'{args.data_dir}/cifar10_data'
elif args.dataset == 'CIFAR100':
args.data_dir = f'{args.data_dir}/cifar100_data'
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset,
root=args.data_dir, split=args.train_split, is_training=True,
batch_size=args.batch_size, repeats=args.epoch_repeats)
dataset_eval = create_dataset(
args.dataset, root=args.data_dir, split=args.val_split, is_training=False, batch_size=args.batch_size)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
repeated_aug=args.repeated_aug,
)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
# setup loss function
if args.jsd:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing).cuda()
elif mixup_active:
# smoothing is handled with mixup target transform
train_loss_fn = SoftTargetCrossEntropy().cuda()
elif args.smoothing:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
else:
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
label_loss_fn = nn.BCEWithLogitsLoss().cuda()
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
if args.global_rank == 0:
if args.experiment:
exp_name = args.experiment
else:
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
output_dir = get_outdir(args.output if args.output else './output/train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, label_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.global_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, label_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None and args.global_rank == 0:
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
if args.pause is not None:
if epoch - start_epoch >= args.pause:
break
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_one_epoch(
epoch, model, loader, optimizer, loss_fn, label_loss_fn, args,
lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_label_m = AverageMeter()
losses_class_m = AverageMeter()
losses_m = AverageMeter()
top1_label_m = AverageMeter()
if args.fake_separated_loss_log:
fake_losses_m = AverageMeter()
origin_losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
# ここでとってくるローダーを二種類に ここで fake と original をここで input と target にまーじ (後回し)
# import pdb; pdb.set_trace()
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if not mixup_active:
# mixupがoffの場合,label形式でtargetが入ってくる(mixup系消去,ラベルターゲット)
target_label = (target < args.num_classes).to(dtype=torch.float32).unsqueeze(1)
target_class = target
else:
# mixupがonの場合,one-hot形式でtargetが入ってくる
target_class = target
target_label = torch.sum(target, 1).unsqueeze(1) # realの方を横方法にたす([128, 1000] -> [128, 1]) 1->real, 0->fake
# if args.global_rank == 0 and batch_idx%200 == 0:
# print(f"target:{target.shape}")
# print(target)
# print(f"target_label:{target_label.shape}")
# print(target_label)
if not args.prefetcher:
input, target_class = input.cuda(), target_class.cuda()
if mixup_fn is not None:
input, target_class = mixup_fn(input, target_class)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output, output_label = model(input)
# if args.fake_separated_loss_log:
# # calc loss splited with [0-999], [1000-1999]
# target_labels = torch.argmax(target, axis=1).cuda()
# fake_output = output[target_labels < args.num_classes//2]
# fake_target = target[target_labels < args.num_classes//2]
# origin_output = output[target_labels >= args.num_classes//2]
# origin_target = target[target_labels >= args.num_classes//2]
# fake_loss = loss_fn(fake_output, fake_target)
# origin_loss = loss_fn(origin_output, origin_target)
# if len(fake_target) == 0:
# fake_loss = torch.zeros(1, dtype=torch.float32).cuda()
# if len(origin_target) == 0:
# origin_loss = torch.zeros(1, dtype=torch.float32).cuda()
# if args.global_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
# print(f'fake_target_shape: {fake_target.shape}, origin_target_shape: {origin_target.shape}')
# print(f'fake_loss: {fake_loss}, origin_loss: {origin_loss}')
loss_class = loss_fn(output, target_class)
loss_label = label_loss_fn(output_label, target_label)
# batch size 混ぜるときに気を付ける
# loss = (loss_class + loss_label)/2
# args.separate_rate = 1.0 の時,クラス分類タスクとfake判別タスクを毎回同等の価値とみなしてバックワードを回す.0.5なら,fake判別タスクの価値はクラス分類タスクの価値の半分
# rate_loss = (loss_class/loss_label).item()
loss = (loss_class + 0*loss_label)
acc1_label = accuracy_label(output_label, target_label)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
losses_class_m.update(loss_class.item(), input.size(0))
losses_label_m.update(loss_label.item(), input.size(0))
top1_label_m.update(acc1_label.item(), output.size(0))
# if args.fake_separated_loss_log:
# if len(fake_target) > 0:
# fake_losses_m.update(fake_loss.item(), len(fake_target))
# if len(origin_target) > 0:
# origin_losses_m.update(origin_loss.item(), len(origin_target))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer,
clip_grad=args.clip_grad, clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
reduced_loss_class = reduce_tensor(loss_class.data, args.world_size)
reduced_loss_label = reduce_tensor(loss_label.data, args.world_size)
acc1_label = reduce_tensor(acc1_label, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
losses_class_m.update(reduced_loss_class.item(), input.size(0))
losses_label_m.update(reduced_loss_label.item(), input.size(0))
top1_label_m.update(acc1_label.item(), output.size(0))
# if args.fake_separated_loss_log:
# # len(fake_target)やlen(origin_target)を全プロセスで足し合わせて考慮する必要あり
# fake_local_sum_loss = torch.tensor([len(fake_target)*fake_loss.item()], dtype=torch.float32).cuda()
# dist.all_reduce(fake_local_sum_loss.data, op=dist.ReduceOp.SUM)
# fake_nums = torch.tensor([len(fake_target)], dtype=torch.int64).cuda()
# dist.all_reduce(fake_nums.data, op=dist.ReduceOp.SUM)
# if fake_nums.item() > 0:
# fake_losses_m.update(fake_local_sum_loss.item()/fake_nums.item(), fake_nums.item())
# origin_local_sum_loss = torch.tensor([len(origin_target)*origin_loss.item()], dtype=torch.float32).cuda()
# dist.all_reduce(origin_local_sum_loss.data, op=dist.ReduceOp.SUM)
# origin_nums = torch.tensor([len(origin_target)], dtype=torch.int64).cuda()
# dist.all_reduce(origin_nums.data, op=dist.ReduceOp.SUM)
# if origin_nums.item() > 0:
# origin_losses_m.update(origin_local_sum_loss.item()/origin_nums.item(), origin_nums.item())
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.global_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
'Loss_Class: {loss_class.val:>7.4f} ({loss_class.avg:>6.4f}) '
'Loss_Label: {loss_label.val:>7.4f} ({loss_label.avg:>6.4f}) '
'Acc@label: {top1_label.val:>7.4f} ({top1_label.avg:>7.4f}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
loss_class=losses_class_m,
loss_label=losses_label_m,
top1_label=top1_label_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
if args.fake_separated_loss_log:
return OrderedDict([('loss', losses_m.avg), ('fake_loss', fake_losses_m.avg), ('origin_loss', origin_losses_m.avg)])
else:
return OrderedDict([('loss', losses_m.avg), ('loss_class', losses_class_m.avg), ('loss_label', losses_label_m.avg), ('top1_label', top1_label_m.avg)])
def validate(model, loader, loss_fn, label_loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_class_m = AverageMeter()
losses_label_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
top1_label_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
target_label = (target < args.num_classes).to(dtype=torch.float32).unsqueeze(1)
target_class = target
# if args.global_rank == 0 and batch_idx == 0:
# print(f"target:{target_class.shape}")
# print(target_class)
# print(f"target_label:{target_label.shape}")
# print(target_label)
with amp_autocast():
output, output_label = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
output_label = output_label.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target_class = target_class[0:target.size(0):reduce_factor]
target_label = target_label[0:target_label.size(0):reduce_factor]
loss_class = loss_fn(output, target_class)
loss_label = label_loss_fn(output_label, target_label)
acc1, acc5 = accuracy(output, target_class, topk=(1, 5))
acc1_label = accuracy_label(output_label, target_label)
if args.distributed:
reduced_loss_class = reduce_tensor(loss_class.data, args.world_size)
reduced_loss_label = reduce_tensor(loss_label.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
acc1_label = reduce_tensor(acc1_label, args.world_size)
else:
reduced_loss_class = loss_class.data
reduced_loss_label = loss_label.data
torch.cuda.synchronize()
losses_class_m.update(reduced_loss_class.item(), input.size(0))
losses_label_m.update(reduced_loss_label.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
top1_label_m.update(acc1_label.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.global_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss_Class: {loss_class.val:>7.4f} ({loss_class.avg:>6.4f}) '
'Loss_Label: {loss_label.val:>7.4f} ({loss_label.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) '
'Acc@label: {top1_label.val:>7.4f} ({top1_label.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss_class=losses_class_m, loss_label=losses_label_m, top1=top1_m, top5=top5_m, top1_label=top1_label_m))
metrics = OrderedDict([('loss_class', losses_class_m.avg), ('loss_label', losses_label_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg), ('top1_label', top1_label_m.avg)])
return metrics
if __name__ == '__main__':
main()
| [
"torch.cuda.synchronize",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.nn.BCEWithLogitsLoss",
"torch.jit.script",
"torch.nn.CrossEntropyLoss",
"torch.sum"
] | 1.4.0 | Daweek/Edgar-pytorch-image-models | ca3398c64a8fb8b2e01f7057679440ff6a6c6672 |
1.0 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
import torch
from torch.nn.utils.rnn import pad_sequence
from ..tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTrainedTokenizerBase
InputDataClass = NewType("InputDataClass", Any)
"""
A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
of Tensors.
"""
DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, torch.Tensor]])
def default_data_collator(features: List[InputDataClass]) -> Dict[str, torch.Tensor]:
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- ``label``: handles a single value (int or float) per object
- ``label_ids``: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
"""
# In this function we'll make the assumption that all `features` in the batch
# have the same attributes.
# So we will look at the first element as a proxy for what attributes exist
# on the whole batch.
if not isinstance(features[0], (dict, BatchEncoding)):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if type(first["label_ids"][0]) is int else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
else:
batch[k] = torch.tensor([f[k] for f in features])
return batch
@dataclass
class DataCollatorWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
if "label_ids" in batch:
batch["labels"] = batch["label_ids"]
del batch["label_ids"]
return batch
@dataclass
class DataCollatorForTokenClassification:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
sequence_length = torch.tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]
else:
batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels]
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
return batch
def _collate_batch(examples, tokenizer):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
# Check if padding is necessary.
length_of_first = examples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length:
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def tolist(x: Union[List[Any], torch.Tensor]):
return x.tolist() if isinstance(x, torch.Tensor) else x
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
def __call__(self, features):
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
return self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
@dataclass
class DataCollatorForLanguageModeling:
"""
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
mlm (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use masked language modeling. If set to :obj:`False`, the labels are the same as the
inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for
non-masked tokens and the value to predict for the masked token.
mlm_probability (:obj:`float`, `optional`, defaults to 0.15):
The probability with which to (randomly) mask tokens in the input, when :obj:`mlm` is set to :obj:`True`.
.. note::
For best performance, this data collator should be used with a dataset having items that are dictionaries or
BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a
:class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the
argument :obj:`return_special_tokens_mask=True`.
"""
tokenizer: PreTrainedTokenizerBase
mlm: bool = True
mlm_probability: float = 0.15
def __post_init__(self):
if self.mlm and self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
"You should pass `mlm=False` to train on causal language modeling instead."
)
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], (dict, BatchEncoding)):
batch = self.tokenizer.pad(examples, return_tensors="pt")
else:
batch = {"input_ids": _collate_batch(examples, self.tokenizer)}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = batch["input_ids"].clone()
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def mask_tokens(
self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
"""
Data collator used for language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
"""
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _collate_batch(input_ids, self.tokenizer)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = e["input_ids"].size(0)
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _collate_batch(mask_labels, self.tokenizer)
inputs, labels = self.mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token == "[CLS]" or token == "[SEP]":
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
assert len(covered_indexes) == len(masked_lms)
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def mask_tokens(self, inputs: torch.Tensor, mask_labels: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = mask_labels
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForSOP(DataCollatorForLanguageModeling):
"""
Data collator used for sentence order prediction task.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for both masked language modeling and sentence order prediction
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
"DataCollatorForLanguageModeling instead.",
FutureWarning,
)
def __call__(self, examples: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
input_ids = [example["input_ids"] for example in examples]
input_ids = _collate_batch(input_ids, self.tokenizer)
input_ids, labels, attention_mask = self.mask_tokens(input_ids)
token_type_ids = [example["token_type_ids"] for example in examples]
# size of segment_ids varied because randomness, padding zero to the end as the original implementation
token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
sop_label_list = [example["sentence_order_label"] for example in examples]
sentence_order_label = torch.stack(sop_label_list)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"sentence_order_label": sentence_order_label,
}
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
original. N-gram not applied yet.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
# probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
attention_mask = (~masked_indices).float()
if self.tokenizer._pad_token is not None:
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels, attention_mask
@dataclass
class DataCollatorForPermutationLanguageModeling:
"""
Data collator used for permutation language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for permutation language modeling with procedures specific to XLNet
"""
tokenizer: PreTrainedTokenizerBase
plm_probability: float = 1 / 6
max_span_length: int = 5 # maximum length of a span of masked tokens
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
examples = [e["input_ids"] for e in examples]
batch = _collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting ``cur_len = 0`` (number of tokens processed so far).
1. Sample a ``span_length`` from the interval ``[1, max_span_length]`` (length of span of tokens to be
masked)
2. Reserve a context of length ``context_length = span_length / plm_probability`` to surround span to be
masked
3. Sample a starting point ``start_index`` from the interval ``[cur_len, cur_len + context_length -
span_length]`` and mask tokens ``start_index:start_index + span_length``
4. Set ``cur_len = cur_len + context_length``. If ``cur_len < max_len`` (i.e. there are tokens remaining in
the sequence to be processed), repeat from Step 1.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer."
)
if inputs.size(1) % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details."
)
labels = inputs.clone()
# Creating the mask and target_mapping tensors
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = labels.size(1)
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = torch.eye(labels.size(1))
special_tokens_mask = torch.tensor(
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
dtype=torch.bool,
)
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
masked_indices.masked_fill_(padding_mask, value=0.0)
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100 # We only compute loss on masked tokens
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
perm_index = torch.arange(labels.size(1))
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
# Permute the two halves such that they do not cross over
perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
# Flatten this out into the desired permuted factorisation order
perm_index = torch.flatten(perm_index.transpose(0, 1))
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask[i] = (
perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
) & masked_indices[i]
return inputs.long(), perm_mask, target_mapping, labels.long()
| [
"torch.stack",
"torch.nn.utils.rnn.pad_sequence",
"torch.randint",
"torch.full",
"torch.tensor",
"torch.bernoulli"
] | 1.0 | suakow/transformers | 0ec3619bb2c212737d2472cccaf6658317d2bfa1 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003. """
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
from torch import nn
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
task_type: Optional[str] = field(
default="NER", metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."}
)
labels: Optional[str] = field(
default=None,
metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
module = import_module("tasks")
try:
token_classification_task_clazz = getattr(module, model_args.task_type)
token_classification_task: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}"
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Prepare CONLL-2003 task
labels = token_classification_task.get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
train_dataset = (
TokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.train,
)
if training_args.do_train
else None
)
eval_dataset = (
TokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.dev,
)
if training_args.do_eval
else None
)
def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
out_label_list = [[] for _ in range(batch_size)]
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(p: EvalPrediction) -> Dict:
preds_list, out_label_list = align_predictions(p.predictions, p.label_ids)
return {
"accuracy_score": accuracy_score(out_label_list, preds_list),
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
# Predict
if training_args.do_predict:
test_dataset = TokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.test,
)
predictions, label_ids, metrics = trainer.predict(test_dataset)
preds_list, _ = align_predictions(predictions, label_ids)
output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt")
if trainer.is_world_process_zero():
with open(output_test_results_file, "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f:
token_classification_task.write_predictions_to_file(writer, f, preds_list)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| [
"torch.nn.CrossEntropyLoss"
] | 1.0 | suakow/transformers | 0ec3619bb2c212737d2472cccaf6658317d2bfa1 |
1.7 | import os
import sys
sys.path.append(os.path.abspath(".") + "/../")
sys.path.append(os.path.abspath("."))
import clip
import torch
import json
from PIL import Image
# Load the model
device = "cuda" if torch.cuda.is_available() else "cpu"
#model, preprocess = clip.load('ViT-B/32', device)
# 华为云项目
model, preprocess = clip.load('/home/featurize/work/CLIP/save/CLIP-concept-finetune.ckpt', device, jit=False)
#model, preprocess = clip.load('/home/featurize/work/CLIP/save/checkpoints_new/CLIP-concept-finetune_train_20.ckpt', device, jit=False)
#model, preprocess = clip.load('/home/featurize/work/CLIP/save/contrastive_learning_checkpoints/CLIP-concept-finetune_clustering_modified_classified_0_8_xueyao_finetune.ckpt', device, jit=False)
def load_zh_en_concepts(infilename):
concept_dict = {}
with open(infilename, "r", encoding = "utf-8") as infile:
for line in infile:
line_list = line.strip("\n").split("\t")
if len(line_list) >= 2:
concept_dict[line_list[0]] = line_list[1]
return concept_dict
def load_concept_image_pair(infilename):
image_conept_pair = {}
with open(infilename, "r", encoding = "utf-8") as infile:
line = infile.readline()
concept_dict = json.loads(line.strip())
for key, value_list in concept_dict.items():
for value in value_list:
if str(value) not in image_conept_pair:
image_conept_pair[str(value)] = key
return image_conept_pair
def load_common_concepts(infilename):
concept_dict = {}
with open(infilename, "r", encoding = "utf-8") as infile:
for line in infile:
line = line.strip("\n")
if line not in concept_dict:
concept_dict[line] = None
return concept_dict
def main():
topK = 50
concepts_dict = load_zh_en_concepts("validates/zh_en_concepts.txt")
concepts_list = concepts_dict.keys()
image_concept_pair = load_concept_image_pair("validates/concept_image.json")
#for image, concept in image_concept_pair.items():
# print (image, concept)
#validate_en_concepts_dict = load_common_concepts("validate_concepts.txt")
validate_en_concepts_dict = load_common_concepts("human_validate_concepts.txt")
pos_label_count = 0
neg_label_count = 0
for key, value in image_concept_pair.items():
#print (key, value)
image_fielpath_jpg = "validates/images/" + key + ".jpg"
image_fielpath_png = "validates/images/" + key + ".png"
if os.path.isfile(image_fielpath_jpg):
image_filepath = image_fielpath_jpg
elif os.path.isfile(image_fielpath_png):
image_filepath = image_fielpath_png
else:
print ("Path not exist.")
continue
concept = value
#print (image_filepath, concept)
image = Image.open(image_filepath)
image_input = preprocess(image).unsqueeze(0).to(device)
# assure the concept in validate datasets
if concepts_dict[concept] not in validate_en_concepts_dict:
print ("concept %s-%s not in validate concept dataset" % (concept, concepts_dict[concept]))
continue
# Prepare the inputs
candidate_concept = [concepts_dict[concept]] + [concepts_dict[ele] for ele in concepts_list if ele != concept]
#print (candidate_concept[0:topK])
text_inputs = torch.cat([clip.tokenize(f"a photo of a {c}") for c in candidate_concept]).to(device)
# Calculate features
with torch.no_grad():
image_features = model.encode_image(image_input)
text_features = model.encode_text(text_inputs)
# Pick the top 5 most similar labels for the image
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)
values, indices = similarity[0].topk(topK)
#values, indices = similarity[0].topk(50)
longest_index = 0
ratio = values[0]/20.0
gap_flag = False
for index in range(len(values)):
if values[index] < ratio:
longest_index = index
gap_flag = True
break
if gap_flag == False:
longest_index = -1
values, indices = values[:longest_index], indices[:longest_index]
#print (values, indices)
# Print the result
#print("\nTop predictions:\n")
#for value, index in zip(values, indices):
# print(f"{candidate_concept[index]:>16s}: {100 * value.item():.2f}%")
print ("ground: image_path=%s, concept=%s" % (image_filepath, concepts_dict[concept]))
#print (indices)
print ("predict: concept=%s" % candidate_concept[indices[0]])
result_concept_set = set()
for index in indices:
result_concept_set.add(candidate_concept[index])
if concepts_dict[concept] in result_concept_set:
#print ("yes")
pos_label_count += 1
else:
#print ("no")
neg_label_count += 1
print ("pos_label_count=", pos_label_count)
print ("neg_label_count=", neg_label_count)
total_label_count = pos_label_count + neg_label_count
acc = pos_label_count / total_label_count
print ("acc=", acc)
if __name__ == '__main__':
main()
| [
"torch.no_grad",
"torch.cuda.is_available"
] | 1.7.1 | zhuxiangru/CLIP-finetune-v1 | 48bbe2c203667d94bcc82ea3fcdd2acec7ada8ab |
0.4 | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
alpha = alpha.to(device)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
user_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.InstanceNorm2d
else:
use_bias = norm_layer != nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
| [
"torch.optim.lr_scheduler.StepLR",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_",
"torch.cuda.is_available",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.DataParallel",
"torch.nn.init.constant_",
"torch.nn.ConvTranspose2d",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.ReflectionPad2d",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_normal_",
"torch.nn.ReplicationPad2d",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.rand",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.optim.lr_scheduler.LambdaLR"
] | 0.4.1 | spongezhang/pytorch-CycleGAN-and-pix2pix | 01875b21d537512c304f37fb0eb65fea7f57f4ba |
1.9 | import os
import torch
import numpy as np
import math
import scipy
from htvlearn.lattice import Lattice
from htvlearn.delaunay import Delaunay
from htvlearn.grid import Grid
class Hex():
"""Hexagonal lattice vectors"""
v1 = Lattice.hexagonal_matrix[:, 0].numpy()
v2 = Lattice.hexagonal_matrix[:, 1].numpy()
class BoxSpline():
"""Three-directional hexagonal box spline"""
center_points = np.array([0., 0.])
border_points = np.array([Hex.v1, Hex.v2, -Hex.v1 + Hex.v2,
-Hex.v1, -Hex.v2, -Hex.v2 + Hex.v1])
points = np.vstack((center_points, border_points, 2 * border_points))
values = np.array([math.sqrt(3) / 2,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
htv = 12
class SimplicialSpline():
"""Simplicial spline with randomly positioned vertices"""
np.random.seed(3)
center_points = np.array([0., 0.]) + np.random.uniform(-0.2, 0.2, (2, ))
border_points = np.array([Hex.v1, Hex.v2, -Hex.v1 + Hex.v2,
-Hex.v1, -Hex.v2, -Hex.v2 + Hex.v1]) + \
np.random.uniform(-0.2, 0.2, (6, 2))
points = np.vstack((center_points, border_points, 2 * border_points))
values = np.array([math.sqrt(3) / 2,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
class CutPyramid():
"""Pyramid with flat top"""
points = np.vstack((BoxSpline.center_points,
BoxSpline.border_points,
2 * BoxSpline.border_points,
3 * BoxSpline.border_points))
values = np.array([1., 1., 1., 1., 1., 1., 1.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., ])
htv = 16 * math.sqrt(3)
class SimpleJunction():
"""A simple two-polytope junction"""
points = np.array([[0., 0.], [1., 0.], [0., 1.], [1., 1.],
[0., 3. / 4], [1., 1. / 4]])
values = np.array([0., 2. / 3, 2. / 3, 0., 1., 1.])
# gradients of each polytope
a1_affine_coeff = np.array([2. / 3, 4. / 3., 0.])
a2_affine_coeff = np.array([-2. / 3, -4. / 3., 2.])
htv = 10. / 3
def init_distorted_grid(size_=(3, 3), range_=(-1, 1)):
"""
Initialize a distorted grid.
Args:
size (2-tuple): grid size.
range (2-tuple):
range of points in each dimension before distortion.
Returns:
points (np.array):
distorted grid points. size: (size_[0]*size_[1]) x 2.
"""
assert isinstance(size_, tuple)
assert len(size_) == 2
# initialize undistorted grid points (u, v)
vec1 = np.linspace(*range_, size_[0]) * 1.
vec2 = np.linspace(*range_, size_[1]) * 1.
u, v = np.meshgrid(vec1, vec2)
u = u.flatten()
v = v.flatten()
# add noise to the interior vertices of the grid
mask = np.ma.mask_or(np.abs(u) == u.max(), np.abs(v) == v.max())
points = np.hstack((u[:, np.newaxis], v[:, np.newaxis])).copy()
# the noise is scaled according to the grid spacing
noise = (np.random.rand(*points.shape) - 0.5) * (u[1] - u[0])
# don't add noise to boundary vertices
points[~mask] = points[~mask] + noise[~mask]
return points
class DistortedGrid:
"""Dataset with random values in a distorted random grid"""
points = init_distorted_grid(size_=(3, 3))
values = (np.random.rand(points.shape[0], ) - 0.5)
class Data():
"""Data class for algorithms"""
def __init__(self,
data_from_ckpt=None,
dataset_name=None,
num_train=None,
data_dir='./data',
valid_fraction=0.2,
test_as_valid=False,
non_uniform=False,
noise_ratio=0.,
seed=-1,
verbose=False,
**kwargs):
"""
Args:
data_from_ckpt (dict):
dictionary with 'train', 'valid' and 'test' data loaded
from a checkpoint.
dataset_name (str)
num_train (int):
number of training+valid samples. The effective number of
training samples is a multiple of 1000. Further, if the
dataset has gaps the data inside the gaps will also removed.
data_dir (int):
data directory (for face dataset)
valid_fraction (float [0,1]):
fraction of num_train samples that is used for validation
test_as_valid (bool):
if True, use test set in validation.
non_uniform (bool):
if True, perform non-uniform data sampling (face dataset only).
noise_ratio (float >= 0):
noise that should be applied to the samples as a fraction of
the data range.
seed (int):
seed for random generation. If negative, no seed is set.
verbose (bool):
print more info.
"""
self.data_from_ckpt = data_from_ckpt
self.dataset_name = dataset_name
self.num_train = num_train
if self.data_from_ckpt is None:
assert self.dataset_name is not None
if not self.dataset_name.startswith('pyramid'):
assert self.num_train is not None
self.data_dir = data_dir
self.valid_fraction = valid_fraction
self.test_as_valid = test_as_valid
self.non_uniform = non_uniform
self.noise_ratio = noise_ratio
self.seed = seed
self.verbose = verbose
# if not overwritten, computed in add_noise_to_values()
# from self.noise_ratio and dataset height range
self.noise_std = None
if self.seed >= 0:
# set seed
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
np.random.seed(self.seed)
self.train, self.valid, self.test = {}, {}, {}
self.delaunay = {} # points and values for delaunay triangulation
if self.data_from_ckpt is not None:
# load data from self.data_from_ckpt
assert 'train' in self.data_from_ckpt
assert 'valid' in self.data_from_ckpt
self.train = self.data_from_ckpt['train']
self.valid = self.data_from_ckpt['valid']
if 'delaunay' in self.data_from_ckpt:
assert 'points' in self.data_from_ckpt['delaunay']
assert 'values' in self.data_from_ckpt['delaunay']
self.delaunay['points'] = \
self.data_from_ckpt['delaunay']['points']
self.delaunay['values'] = \
self.data_from_ckpt['delaunay']['values']
self.init_data()
def init_data(self):
"""Initialize cpwl dataset, and train/test/valid sets"""
if not bool(self.delaunay):
if self.dataset_name.startswith('pyramid'):
self.delaunay['points'], self.delaunay['values'] = \
self.init_pyramid()
# training is made of all pyramid's points except apex
self.train['input'] = \
torch.from_numpy(self.delaunay['points'][:-1]).clone()
self.train['values'] = \
torch.from_numpy(self.delaunay['values'][:-1]).clone()
# force validation set to be equal to test set
self.test_as_valid = True
elif self.dataset_name.endswith('planes'):
self.delaunay['points'], self.delaunay['values'] = \
self.init_planes()
elif 'face' in self.dataset_name:
self.delaunay['points'], self.delaunay['values'] = \
self.init_face(self.data_dir,
cut=True
if 'cut' in self.dataset_name
else False)
self.cpwl = Delaunay(points=self.delaunay['points'],
values=self.delaunay['values'])
if not self.cpwl.has_rectangular_range:
if self.dataset_name.endswith('planes'):
h = (self.cpwl.tri.points[:, 0].max() -
self.cpwl.tri.points[:, 0].min()) / 400
self.test['input'] = \
Grid(x1_min=self.cpwl.tri.points[:, 0].min(),
x1_max=self.cpwl.tri.points[:, 0].max(),
x2_min=self.cpwl.tri.points[:, 1].min(),
x2_max=self.cpwl.tri.points[:, 1].max(),
h=h,
to_numpy=False,
to_float32=True).x
# discard samples outside convex set
idx = self.cpwl.tri.find_simplex(self.test['input'])
self.test['input'] = self.test['input'][idx >= 0]
else:
# generate uniformly distributed samples in cpwl convex set
# the final number of test samples will be smaller because
# samples outside lattice are discarded
nb_samples = 160000 # 400*400
self.test['input'] = \
self.generate_random_samples(nb_samples)
else:
# test set is sampled on a grid inside the convex hull of cpwl
# this gives a test grid 500 x 500 samples
self.test['input'] = self.cpwl.get_grid(h=0.0025,
to_numpy=False,
to_float32=True).x
self.test['values'] = self.cpwl.evaluate(self.test['input'])
print(f'\nnb. of test data points : {self.test["input"].size(0)}')
if (not bool(self.valid)) and (self.test_as_valid is True):
self.valid['input'] = self.test['input'].clone()
self.valid['values'] = self.test['values'].clone()
if not bool(self.train):
num_train_valid_samples = int(self.num_train)
if self.dataset_name.endswith('planes'):
# generate grid in lattice reference
x_lat = torch.empty((num_train_valid_samples, 2))
x_lat.uniform_(-0.5, 0.5)
# convert to standard coordinates
x = (Lattice.hexagonal_matrix @ x_lat.t()).t()
elif self.non_uniform is True:
hull_points = \
self.cpwl.tri.points[self.cpwl.convex_hull_points_idx]
# compute largest distance
max_dist = np.amax(np.sqrt(np.sum(hull_points ** 2, axis=1)))
# radius
r = (torch.empty((num_train_valid_samples, 1))
.uniform_(0., max_dist * 0.8))
# angle
theta = (torch.empty((num_train_valid_samples, 1))
.uniform_(0., 2 * np.pi))
# points
x = torch.cat((r * theta.cos(), r * theta.sin()), dim=1)
# Only keep points inside cpwl convex hull
x_simplices_idx = self.cpwl.tri.find_simplex(x)
x = x[x_simplices_idx >= 0]
else:
# generate num_train_valid_samples uniformly distributed
# in cpwl convex set
x = self.generate_random_samples(num_train_valid_samples)
# training / validation split indices
if not self.test_as_valid:
split_idx = int((1 - self.valid_fraction) *
x.size(0))
else:
# full training set, validation set = test set
split_idx = x.size(0)
self.train['input'] = x[0:split_idx]
self.train['values'] = self.cpwl.evaluate(self.train['input'])
if self.dataset_name.endswith('gaps'):
# [(gap_x_min, gap_x_max)...]
gap_x_range = [[0.108, 0.234], [-0.07, 0.226],
[-0.234, -0.108]]
# [(gap_y_min, gap_y_max)...]
gap_y_range = [[-0.21, 0.07], [0.19, 0.311], [-0.21, 0.063]]
# remove data inside gaps
for i in range(len(gap_x_range)):
gap_mask = (
(self.train['input'][:, 0] >= gap_x_range[i][0]) *
(self.train['input'][:, 0] <= gap_x_range[i][1]) *
(self.train['input'][:, 1] >= gap_y_range[i][0]) *
(self.train['input'][:, 1] <= gap_y_range[i][1]))
self.train['input'] = self.train['input'][~gap_mask]
self.train['values'] = self.train['values'][~gap_mask]
if not np.allclose(self.noise_ratio, 0.):
# add noise to training data
self.train['values'] = \
self.add_noise_to_values(self.train['values'])
if self.train['input'].size(0) >= 3000:
# effective number of samples (rounded to 1000)
num = int(np.floor(self.train['input'].size(0) / 1000.) * 1000)
idx = torch.randperm(self.train['input'].size(0))[:num]
self.train['input'] = self.train['input'][idx]
self.train['values'] = self.train['values'][idx]
print('nb. of training data points : '
f'{self.train["input"].size(0)}')
if not bool(self.valid):
self.valid['input'] = x[(split_idx + 1)::]
self.valid['values'] = \
self.cpwl.evaluate(self.valid['input'])
@staticmethod
def add_lattice_vertices(points, values, eps=0.):
"""Add lattice vertices (up to eps distance away)
Args:
points (torch.Tensor or np.ndarray): size (m, 2)
values (torch.Tensor or np.ndarray): size (m,)
eps (float): buffer distance from boundaries of lattice.
"""
nparray = False
if isinstance(points, np.ndarray):
nparray = True
# convert to torch
points = torch.from_numpy(points)
values = torch.from_numpy(values)
# add lattice corners
br = Lattice.bottom_right_std
ur = Lattice.upper_right_std
a, b = eps * np.sqrt(3) / 2., eps * .5
lat_points = \
torch.tensor([[-ur[0] + a, -ur[1] + b],
[br[0] - b, br[1] + a],
[-br[0] + b, -br[1] - a],
[ur[0] - a, ur[1] - b]])
points = torch.cat((points, lat_points), dim=0)
values = torch.cat((values, torch.zeros(4)))
if nparray is True:
# convert to numpy
points = points.numpy()
values = values.numpy()
return points, values
def generate_random_samples(self, num_samples):
"""
Generate uniformly distributed data inside convex set.
Works by generating num_samples points and then rejecting the
ones outside the convex set.
Args:
num_samples (int) (before possible rejection)
Returns:
x (torch.tensor)
"""
x = torch.empty((num_samples, 2))
x[:, 0].uniform_(self.cpwl.tri.points[:, 0].min(),
self.cpwl.tri.points[:, 0].max())
x[:, 1].uniform_(self.cpwl.tri.points[:, 1].min(),
self.cpwl.tri.points[:, 1].max())
# reject samples outside convex set
idx = self.cpwl.tri.find_simplex(x)
x = x[idx >= 0]
return x
def add_noise_to_values(self, values):
"""
Add gaussian noise to values.
if self.noise_std exists, it is used as the noise standard deviation,
otherwise noise_std is computed from self.noise_ratio and the data
height range.
Args:
values (torch.tensor):
values to add noise to.
Returns the noisy values.
"""
noise_std = self.noise_std
if noise_std is None:
noise_std = self.noise_ratio * (values.max() - values.min())
if self.verbose:
print('Adding noise of standard deviation '
'sigma = {:.2E}'.format(noise_std))
noise = torch.empty_like(values).normal_(std=noise_std)
return values + noise
@staticmethod
def init_pyramid():
"""
Initialize the pyramid dataset.
Returns:
points (np.array): size (M, 2).
values (np.array): size (M,)
"""
# points in lattice coordinates
h = 0.1
points = torch.tensor([[2 * h, 0.], [0., 2 * h],
[2 * h, -2 * h], [0., -2 * h],
[-2 * h, 0.], [-2 * h, 2 * h],
[h, 0.], [0., h],
[h, -h], [0., -h],
[-h, 0.], [-h, h],
[0., 0.]]) # last element -> apex
values = torch.tensor([.0, .0, .0, .0, .0, .0,
.1, .1, .1, .1, .1, .1,
.2])
# convert to standard coordinates
points = (Lattice.hexagonal_matrix @ points.t()).t()
return points.numpy(), values.numpy()
@classmethod
def init_zero_boundary_planes(cls):
"""
Initialize the planes dataset with zero boundaries.
Returns:
points (torch.tensor): size (M, 2).
values (torch.tensor): size (M,)
"""
# fit planes function in the lattice
pad = 0.08
x_min, _, x_max, _ = cls.get_data_boundaries(hw_ratio=0.01, pad=pad)
_, y_min, _, y_max = cls.get_data_boundaries(hw_ratio=100, pad=pad)
dx = (x_max - x_min) / 100 # delta x step
dy = (y_max - y_min) / 100 # delta y step
# control points with values - (x1, x2, val)
vert = \
torch.tensor([[x_min + 30 * dx, y_min + 35 * dy, dx * 20], # 0
[x_max - 40 * dx, y_min + 30 * dy, dx * 20], # 1
[x_max - 35 * dx, y_max - 30 * dy, dx * 20], # 2
[x_min + 40 * dx, y_max - 30 * dy, dx * 20], # 3
[x_max - 25 * dx, y_min + 5 * dy, 0.], # 4
[x_min + 25 * dx, y_max - 5 * dy, 0.]]) # 5
# auxiliary triangulation of the function
# size (num_simplices, vertices)
simplices = torch.tensor([[0, 1, 3],
[1, 2, 3],
[4, 1, 0],
[0, 3, 5],
[4, 2, 1],
[3, 2, 5]])
# check values of vertices so that there is a seamless plane junction
x_v6 = cls.get_zero_loc(vert, simplices, 2, 3)
x_v7 = cls.get_zero_loc(vert, simplices, 4, 5)
br = Lattice.bottom_right_std
ur = Lattice.upper_right_std
# add x_v6, x_v7, and lattice corners
new_vert = torch.tensor([[x_v6[0], x_v6[1], 0.], # 6
[x_v7[0], x_v7[1], 0.], # 7
[-ur[0], -ur[1], 0.], # 8
[br[0], br[1], 0.], # 9
[-br[0], -br[1], 0.], # 10
[ur[0], ur[1], 0.]]) # 11
vert = torch.cat((vert, new_vert), dim=0)
points, values = vert[:, 0:2], vert[:, 2]
return points, values
@staticmethod
def add_linear_func(points, values):
"""
Add a linear term to the dataset.
Args:
points (torch.tensor): size (M, 2).
values (torch.tensor): size (M,)
Returns:
values (torch.tensor): size (M,).
"""
# add linear term to vertices
a = torch.tensor([0.1, 0.05])
b = torch.tensor([-0.05])
values += (points * a.unsqueeze(0)).sum(1) + b
return values
def init_planes(self):
"""
Initialize the planes dataset. Set self.noise_std.
Returns:
points (torch.tensor): size (M, 2).
values (torch.tensor): size (M,)
"""
# initialize planes dataset with zero boundaries
points, values = self.init_zero_boundary_planes()
# overwrite noise standard deviation
self.noise_std = (self.noise_ratio * values.max())
# add linear function to dataset
values = self.add_linear_func(points, values)
# convert to numpy
points, values = points.numpy(), values.numpy()
return points, values
@staticmethod
def get_zero_loc(vert, simplices, idx1, idx2):
"""
Get zero locations of vertices for a seamless junction of the planes.
Args:
vert (np.array):
size: (M, 3) (points in the first two columns,
values in the third)
simplices (np.array):
indexes of vertices for each simplex (row). size: (P, 3).
idx1, idx2 (int>=0):
indices of simplices to join.
Returns:
x (torch.tensor): size (2,)
"""
# size (2, 3, 3)
idx_vec = [idx1, idx2]
simplices_vert = \
torch.cat(tuple(vert[simplices[i]].unsqueeze(0)
for i in idx_vec), dim=0)
plane_coeff = Lattice.solve_method(simplices_vert)
affine_coeff = Lattice.get_affine_coeff_from_plane_coeff(plane_coeff)
assert affine_coeff.size() == (2, 3)
B = -affine_coeff[:, -1:]
A = affine_coeff[:, 0:2]
x = torch.linalg.solve(A, B)
return x.squeeze(-1)
@staticmethod
def read_face(data_dir, cut_eps=0.6):
"""
Read the 3D face dataset and construct a function from it by
cutting and eliminating duplicates.
Args:
cut_eps (float in [0,1]):
what height to cut face relative to its maximum height.
Returns:
cleaned_vert (np.array):
with vertices below cut_eps and duplicates removed and
zero mean.
size: (M, 3) (points in the first two columns,
values in the third)
"""
obj_file = os.path.join(data_dir, 'obj_free_male_head.obj')
V = []
with open(obj_file, "r") as file1:
for line in file1.readlines():
f_list = [i for i in line.split(" ") if i.strip()]
if len(f_list) == 0:
continue
if f_list[0] != 'v':
continue
V += [float(i) for i in f_list[1::]]
# vertices
vert = np.array(V).reshape(-1, 3)
# sort vertices by z coordinates in descending direction
sort_vert = vert[vert[:, 2].argsort()][::-1]
# get unique_idx of first occurences (largest height)
_, unique_dx = np.unique(sort_vert[:, 0:2], return_index=True, axis=0)
unique_sort_vert = sort_vert[unique_dx]
# eliminate vertices whose height is below cutoff
min_height = unique_sort_vert[:, 2].min()
max_height = unique_sort_vert[:, 2].max()
cutoff_val = min_height + (max_height - min_height) * cut_eps
cutoff_mask = np.where(unique_sort_vert[:, 2] > cutoff_val)[0]
cleaned_vert = unique_sort_vert[cutoff_mask]
cleaned_vert[:, 2] = cleaned_vert[:, 2] - \
cutoff_val # shift z.min() to z = 0
x_mean = cleaned_vert[:, 0].min() / 2. + cleaned_vert[:, 0].max() / 2.
y_mean = cleaned_vert[:, 1].min() / 2. + cleaned_vert[:, 1].max() / 2.
cleaned_vert[:, 0] = cleaned_vert[:, 0] - x_mean # shift x around 0
cleaned_vert[:, 1] = cleaned_vert[:, 1] - y_mean # shift t around 0
return cleaned_vert
@classmethod
def init_face(cls, data_dir, cut=False):
"""
Initialize the face dataset.
Args:
cut (bool):
if True, use only a smaller section of the face.
Otherwise, use full face with zero boundaries.
Returns:
points (torch.tensor): size (M, 2).
values (torch.tensor): size (M,)
"""
vert = cls.read_face(data_dir)
# normalize face to fit in [-0.8, 0.8]^2 square
max_ = max(np.abs(vert[:, 0]).max(), np.abs(vert[:, 1]).max())
vert = vert / max_ * 0.8
if cut is True:
# cut a smaller portion of the face
cpwl_aux = Delaunay(points=vert[:, 0:2].copy(),
values=vert[:, 2].copy())
x_min, x_max = -0.324, 0.324
y_min, y_max = -0.45, 0.419
mask = (vert[:, 0] > x_min) * (vert[:, 0] < x_max) * \
(vert[:, 1] > y_min) * (vert[:, 1] < y_max)
vert = vert[mask]
# add extreme points of the convex hull to vertices
hull_points = np.array([[x_min, y_min], [x_max, y_min],
[x_max, y_max], [x_min, y_max]])
hull_values = cpwl_aux.evaluate(hull_points)
new_vertices = np.concatenate(
(hull_points, hull_values[:, np.newaxis]), axis=1)
vert = np.concatenate((vert, new_vertices), axis=0)
else:
points = vert[:, 0:2]
hull = scipy.spatial.ConvexHull(points)
hull_points = points[hull.vertices]
# add points along the convex hull
for i in range(hull_points.shape[0]):
frac = np.linspace(0.01, 0.99, num=99)[:, np.newaxis]
next_vert = i + 1 if i != hull_points.shape[0] - 1 else 0
new_points = hull_points[next_vert][np.newaxis, :] * frac + \
hull_points[i][np.newaxis, :] * (1 - frac)
if cut is True:
# evaluate on convex hull of face
new_values = cpwl_aux.evaluate(new_points)
else:
# add zeros around face (to its convex hull contour)
new_values = np.zeros(new_points.shape[0])
new_vertices = np.concatenate(
(new_points, new_values[:, np.newaxis]), axis=1)
vert = np.concatenate((vert, new_vertices), axis=0)
if cut is False:
# create grid of points with zero value around face
h = 0.01
x_r = vert[:, 0].max() * 10. / 8.
y_r = vert[:, 1].max() * 9.5 / 8.
fine_grid = Grid(x1_min=-x_r,
x1_max=x_r + h,
x2_min=-y_r,
x2_max=y_r + h,
h=h,
to_float32=True).x
# only retain points outside face convex hull
aux_delaunay = scipy.spatial.Delaunay(points)
fine_grid = fine_grid[aux_delaunay.find_simplex(fine_grid) < 0]
# add zeros around face
new_vertices = np.concatenate(
(fine_grid, np.zeros((fine_grid.shape[0], 1))), axis=1)
vert = np.concatenate((vert, new_vertices), axis=0)
vert = cls.fit_in_lattice(vert)
points, values = vert[:, 0:2], vert[:, 2]
return points, values
@classmethod
def fit_in_lattice(cls, vert):
"""
Fit points in lattice.
Args:
vert (np.array):
size: (M, 3) (points in the first two columns,
values in the third)
Returns:
vert (np.array):
scaled vertices that fit in lattice.
"""
# normalize face to fit in lattice
hw_ratio = (vert[:, 1].max() - vert[:, 1].min()) / \
(vert[:, 0].max() - vert[:, 0].min())
_, _, x_max, y_max = cls.get_data_boundaries(hw_ratio=hw_ratio,
pad=0.03)
# recenter data
x_mean = (vert[:, 0].max() + vert[:, 0].min()) / 2
y_mean = (vert[:, 1].max() + vert[:, 1].min()) / 2
vert[:, 0] = vert[:, 0] - x_mean
vert[:, 1] = vert[:, 1] - y_mean
# x,y scaling factors
# vert[i,0] should be within (-x_max, x_max)
# vert[i,1] should be within (-y_max, y_max)
x_norm = x_max / vert[:, 0].max()
y_norm = y_max / vert[:, 1].max()
if x_norm < y_norm:
vert = vert * x_norm
else:
vert = vert * y_norm
return vert
@staticmethod
def get_data_boundaries(hw_ratio=math.sqrt(3), pad=0.1):
"""
Get the data boundaries that allow fitting the data in centered
rectangular region of the lattice with a specified height/width ratio,
so as to maximize occupied space within the interior lattice.
Pad a given distance from the limits if pad > 0.
Takes into account geometry of hexagonal lattice:
if hw_ratio > math.sqrt(3), the data touches the upper and bottom
interior border; otherwise, it touch the left and right borders.
Args:
hw_ratio (float>0):
height/width ratio of rectangular region.
pad (float>=0):
distance to pad from the limits of the region.
Returns:
4-tuple (x_min, x_max, y_min, y_max): data boundaries
"""
# requires that lattice is hexagonal and lsize*h = 1 (enforced)
bottom_right_std = Lattice.bottom_right_std
if hw_ratio > math.sqrt(3): # from geometry maximize space usage
y_min = bottom_right_std[1]
x_min = y_min * (1. / hw_ratio)
else:
a = (bottom_right_std[0] * 2) / (1 + hw_ratio * math.sqrt(3) / 3)
x_min = -a
y_min = x_min * hw_ratio
x_min, y_min = x_min + pad, y_min + pad
x_max, y_max = -x_min, -y_min
return x_min.item(), y_min.item(), x_max.item(), y_max.item()
| [
"torch.cat",
"torch.manual_seed",
"torch.tensor",
"torch.empty",
"torch.zeros",
"torch.cuda.manual_seed_all",
"torch.from_numpy",
"torch.linalg.solve",
"torch.empty_like"
] | 1.9.0 | joaquimcampos/HTV-Learn | dfc6c3b022ba010e18316d941af44d87c98cfa98 |
1.6 | import torch
import torch.nn.functional as F
from rl_algorithms.common.abstract.learner import TensorTuple
import rl_algorithms.common.helper_functions as common_utils
from rl_algorithms.registry import LEARNERS
from rl_algorithms.sac.learner import SACLearner
@LEARNERS.register_module
class BCSACLearner(SACLearner):
"""Learner for BCSAC Agent.
Attributes:
hyper_params (ConfigDict): hyper-parameters
log_cfg (ConfigDict): configuration for saving log and checkpoint
"""
def update_model(
self, experience: TensorTuple, demos: TensorTuple
) -> TensorTuple: # type: ignore
"""Train the model after each episode."""
self.update_step += 1
states, actions, rewards, next_states, dones = experience
demo_states, demo_actions, _, _, _ = demos
new_actions, log_prob, pre_tanh_value, mu, std = self.actor(states)
pred_actions, _, _, _, _ = self.actor(demo_states)
# train alpha
if self.hyper_params.auto_entropy_tuning:
alpha_loss = (
-self.log_alpha * (log_prob + self.target_entropy).detach()
).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
alpha = self.log_alpha.exp()
else:
alpha_loss = torch.zeros(1)
alpha = self.hyper_params.w_entropy
# Q function loss
masks = 1 - dones
states_actions = torch.cat((states, actions), dim=-1)
q_1_pred = self.qf_1(states_actions)
q_2_pred = self.qf_2(states_actions)
v_target = self.vf_target(next_states)
q_target = rewards + self.hyper_params.gamma * v_target * masks
qf_1_loss = F.mse_loss(q_1_pred, q_target.detach())
qf_2_loss = F.mse_loss(q_2_pred, q_target.detach())
# V function loss
states_actions = torch.cat((states, new_actions), dim=-1)
v_pred = self.vf(states)
q_pred = torch.min(self.qf_1(states_actions), self.qf_2(states_actions))
v_target = q_pred - alpha * log_prob
vf_loss = F.mse_loss(v_pred, v_target.detach())
# update actor
actor_loss = torch.zeros(1)
n_qf_mask = 0
if self.update_step % self.hyper_params.policy_update_freq == 0:
# bc loss
qf_mask = torch.gt(
self.qf_1(torch.cat((demo_states, demo_actions), dim=-1)),
self.qf_1(torch.cat((demo_states, pred_actions), dim=-1)),
).to(self.device)
qf_mask = qf_mask.float()
n_qf_mask = int(qf_mask.sum().item())
if n_qf_mask == 0:
bc_loss = torch.zeros(1, device=self.device)
else:
bc_loss = (
torch.mul(pred_actions, qf_mask) - torch.mul(demo_actions, qf_mask)
).pow(2).sum() / n_qf_mask
# actor loss
advantage = q_pred - v_pred.detach()
actor_loss = (alpha * log_prob - advantage).mean()
actor_loss = (
self.hyper_params.lambda1 * actor_loss
+ self.hyper_params.lambda2 * bc_loss
)
# regularization
mean_reg, std_reg = (
self.hyper_params.w_mean_reg * mu.pow(2).mean(),
self.hyper_params.w_std_reg * std.pow(2).mean(),
)
pre_activation_reg = self.hyper_params.w_pre_activation_reg * (
pre_tanh_value.pow(2).sum(dim=-1).mean()
)
actor_reg = mean_reg + std_reg + pre_activation_reg
# actor loss + regularization
actor_loss += actor_reg
# train actor
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
# update target networks
common_utils.soft_update(self.vf, self.vf_target, self.hyper_params.tau)
# train Q functions
self.qf_1_optim.zero_grad()
qf_1_loss.backward()
self.qf_1_optim.step()
self.qf_2_optim.zero_grad()
qf_2_loss.backward()
self.qf_2_optim.step()
# train V function
self.vf_optim.zero_grad()
vf_loss.backward()
self.vf_optim.step()
return (
actor_loss.item(),
qf_1_loss.item(),
qf_2_loss.item(),
vf_loss.item(),
alpha_loss.item(),
n_qf_mask,
)
| [
"torch.zeros",
"torch.cat",
"torch.mul"
] | 1.6.0 | krishanrana/rl_algorithms | c12fe447a70f2f99f37f6c1157907755d38fde81 |
1.8 | # PyTorch utils
import logging
import math
import os
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
import thop # for FLOPS computation
except ImportError:
thop = None
logger = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
def git_describe():
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
if Path('.git').exists():
return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1]
else:
return ''
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
n = torch.cuda.device_count()
if n > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(device.split(',') if device else range(n)):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
#logger.info(s) # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(x, ops, n=100, device=None):
# profile a pytorch module or list of modules. Example usage:
# x = torch.randn(16, 3, 640, 640) # input
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
except:
flops = 0
for _ in range(n):
t[0] = time_synchronized()
y = m(x)
t[1] = time_synchronized()
try:
_ = y.sum().backward()
t[2] = time_synchronized()
except: # no backward method
t[2] = float('nan')
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
def is_parallel(model):
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
except (ImportError, Exception):
fs = ''
#logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)
# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
| [
"torch.zeros",
"torch.device",
"torch.nn.utils.prune.l1_unstructured",
"torch.sqrt",
"torch.cuda.synchronize",
"torch.nn.functional.interpolate",
"torch.cuda.get_device_properties",
"torch.no_grad",
"torch.nn.utils.prune.remove",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.mm",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.functional.pad",
"torch.distributed.barrier"
] | 1.8.1 | AndrewLaird/ChessTutorModels | c4fd960417d5b9918e430d040deb89fed3f4b73b |
1.7 | from torch.nn import Conv2d, MaxPool2d
from torch import no_grad, round
from torch.nn.functional import interpolate
from itertools import chain
class PRISM:
_excitations = []
_hook_handlers = []
_is_orig_image = True
def _excitation_hook(module, input, output):
# for better output sharpness we collect input images
if PRISM._is_orig_image:
PRISM._excitations.append(input[0])
PRISM._is_orig_image = False
PRISM._excitations.append(output)
def register_hooks(model, recursive=False):
if not recursive and PRISM._hook_handlers:
print("Hooks can only be registered to one model at once. Please use: `prune_old_hooks()`")
return
for i, layer in enumerate(model.children()):
if list(layer.children()):
PRISM.register_hooks(layer, recursive=True)
elif isinstance(layer, MaxPool2d):
PRISM._hook_handlers.append(
layer.register_forward_hook(PRISM._excitation_hook)
)
elif isinstance(layer, Conv2d) and layer.stride > (1, 1):
PRISM._hook_handlers.append(
layer.register_forward_hook(PRISM._excitation_hook)
)
def prune_old_hooks(model):
if not PRISM._hook_handlers:
print("No hooks to remove")
for hook in PRISM._hook_handlers:
hook.remove()
PRISM._hook_handlers = []
###############################################
def _svd(final_excitation):
final_layer_input = final_excitation.permute(0, 2, 3, 1).reshape(
-1, final_excitation.shape[1]
)
normalized_final_layer_input = final_layer_input - final_layer_input.mean(0)
# normalized_final_layer_input = final_layer_input
u, s, v = normalized_final_layer_input.svd(compute_uv=True)
raw_features = u[:, :3].matmul(s[:3].diag())
return raw_features.view(
final_excitation.shape[0],
final_excitation.shape[2],
final_excitation.shape[3],
3
).permute(0, 3, 1, 2)
def _quantize(maps):
# h,w,c
maps = PRISM._normalize_to_rgb(maps).permute(0, 2, 3, 1)
quant_maps = 0.5 * round(maps / 0.5)
image_colors = []
for img in quant_maps:
colors_set = set()
for row in img:
for pixel in row:
colors_set.add(pixel.numpy().tostring())
image_colors.append(colors_set)
return quant_maps, image_colors
def _intersection(maps):
quant_maps, image_colors = PRISM._quantize(maps)
common_colors = set.intersection(*image_colors)
for img in quant_maps:
for row in img:
for pixel in row:
if pixel.numpy().tostring() not in common_colors:
pixel *= 0.0
return quant_maps.permute(0, 3, 1, 2)
def _difference(maps):
quant_maps, image_colors = PRISM._quantize(maps)
all_colors= set(chain.from_iterable(image_colors))
exclusive_colors = all_colors - set.intersection(*image_colors)
for img in quant_maps:
for row in img:
for pixel in row:
if pixel.numpy().tostring() not in exclusive_colors:
pixel *= 0.0
return quant_maps.permute(0, 3, 1, 2)
def _upsampling(extracted_features, pre_excitations):
for e in pre_excitations[::-1]:
extracted_features = interpolate(
extracted_features,
size=(e.shape[2], e.shape[3]),
mode="bilinear",
align_corners=False,
)
extracted_features *= e.mean(dim=1, keepdim=True)
return extracted_features
def _normalize_to_rgb(features):
scaled_features = (features - features.mean()) / features.std()
scaled_features = scaled_features.clip(-1, 1)
scaled_features = (scaled_features - scaled_features.min()) / (
scaled_features.max() - scaled_features.min()
)
return scaled_features
def get_maps(grad_extrap=True, inclusive=False, exclusive=False):
if not PRISM._excitations:
print("No data in hooks. Have You used `register_hooks(model)` method?")
return
# [print(e.shape) for e in PRISM._excitations]
with no_grad():
extracted_features = PRISM._svd(PRISM._excitations.pop())
if inclusive and exclusive:
rgb_features_map, _ = PRISM._quantize(extracted_features)
rgb_features_map = rgb_features_map.permute(0, 3, 1, 2)
elif exclusive:
rgb_features_map = PRISM._difference(extracted_features)
elif inclusive:
rgb_features_map = PRISM._intersection(extracted_features)
else:
rgb_features_map = extracted_features
if grad_extrap:
rgb_features_map = PRISM._upsampling(
rgb_features_map, PRISM._excitations
)
rgb_features_map = PRISM._normalize_to_rgb(rgb_features_map)
# prune old PRISM._excitations
PRISM.reset_excitations()
return rgb_features_map
def reset_excitations():
PRISM._is_orig_image = True
PRISM._excitations = []
| [
"torch.round",
"torch.nn.functional.interpolate",
"torch.no_grad"
] | 1.7.1 | szandala/PRISM | 7f40e9ebdb5e53c1343cd3d358933861b24573da |
1.10 | import torch.nn as nn
# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
return out
| [
"torch.nn.Linear",
"torch.nn.ReLU"
] | 1.10.1 | magalhaesdavi/ai-chatbot | 810740fbbd694539d6fd4ddc6f482e5d8f26b52d |
1.8 | import math
from functools import partial
from typing import Tuple
import torch
from torch.quantization.observer import _ObserverBase
from mqbench.utils import sync_tensor, pot_quantization, is_symmetric_quant
from mqbench.utils.logger import logger
class ObserverBase(_ObserverBase):
'''
Support per-tensor / per-channel.
dtype: quant min/max can be infered using dtype, we actually do not need this.
qscheme: quantization scheme
reduce_range: special for fbgemm to avoid overflow
quant_min: fix point value min
quant_max: fix point value max
ch_axis: per-channel axis or per-tensor(-1)
above is similiar to torch observer.
pot_scale: indecate wheather scale is power of two.
'''
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False,
factory_kwargs=None):
self.not_calc_quant_min_max = factory_kwargs.pop('not_calc_quant_min_max', False) if isinstance(factory_kwargs, dict) else False
super(ObserverBase, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max)
self.ch_axis = ch_axis
self.pot_scale = pot_scale
self.register_buffer("min_val", torch.tensor(float("inf")))
self.register_buffer("max_val", torch.tensor(float("-inf")))
class PerChannelLoadHook:
def __init__(self, module):
self.hook = module._register_load_state_dict_pre_hook(partial(self.hook_fn, module=module))
def hook_fn(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs,
module):
if module.ch_axis == -1:
# no per-channel parameters
return
for module_key, param in module._buffers.items():
if module_key not in ['min_val', 'max_val']:
continue
candidate = prefix + module_key
if candidate in state_dict:
input_param = state_dict[candidate]
if param.shape != input_param.shape:
param.data = torch.ones_like(input_param, dtype=param.dtype, device=param.device)
def close(self):
self.hook.remove()
self.load_state_dict_hook = PerChannelLoadHook(self)
@torch.jit.export
def calculate_qparams(self) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Calculates the quantization parameters."""
scale, zero_point = self._calculate_qparams(self.min_val, self.max_val)
if self.pot_scale:
scale = pot_quantization(scale)
scale.data = sync_tensor(scale).data
zero_point.data = sync_tensor(zero_point).data
return scale, zero_point
@torch.jit.export
def _calculate_qmin_qmax(self) -> Tuple[int, int]:
r"""Calculates actual qmin and qmax based on the quantization range,
observer datatype and if range is reduced.
"""
if self.has_customized_qrange:
# This initialization here is to be resolve TorchScript compilation issues and allow
# using of refinement to decouple initial_qmin and initial_qmax from quantization range.
# The actual values of initial_qmin and initial_qmax will be reset below.
initial_quant_min, initial_quant_max = 0, 255
# The following assignment of self.qmin and self.qmax to the local variables and the if check refine the
# attribute from Optional valid integers for use, based on TorchScript's requirements.
custom_quant_min, custom_quant_max = self.quant_min, self.quant_max
if custom_quant_min is not None and custom_quant_max is not None:
initial_quant_min, initial_quant_max = (
custom_quant_min,
custom_quant_max,
)
qrange_len = initial_quant_max - initial_quant_min + 1
if is_symmetric_quant(self.qscheme):
quant_min, quant_max = -qrange_len // 2, qrange_len // 2 - 1
else:
quant_min, quant_max = 0, qrange_len - 1
if self.reduce_range:
quant_min, quant_max = quant_min // 2, quant_max // 2
if self.not_calc_quant_min_max:
quant_min, quant_max = self.quant_min, self.quant_max
else:
# Fallback onto default 8-bit qmin and qmax calculation if dynamic range is not used.
if self.dtype == torch.qint8:
if self.reduce_range:
quant_min, quant_max = -64, 63
else:
quant_min, quant_max = -128, 127
elif self.dtype == torch.quint8:
if self.reduce_range:
quant_min, quant_max = 0, 127
else:
quant_min, quant_max = 0, 255
else:
quant_min, quant_max = 0, 15
return quant_min, quant_max
@torch.jit.export
def extra_repr(self):
return "min_val={}, max_val={} ch_axis={} pot={}".format(self.min_val if self.ch_axis == -1 else 'List',
self.max_val if self.ch_axis == -1 else 'List',
self.ch_axis, self.pot_scale)
class MinMaxObserver(ObserverBase):
'''
Calculate minmax of whole calibration dataset.
'''
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False,
factory_kwargs=None):
super(MinMaxObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs)
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.to(self.min_val.dtype)
if self.ch_axis == -1:
min_val_cur, max_val_cur = torch._aminmax(x)
else:
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))]
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
y = torch.flatten(y, start_dim=1)
min_val_cur, max_val_cur = torch._aminmax(y, 1)
self.min_val = torch.min(self.min_val, min_val_cur)
self.max_val = torch.max(self.max_val, max_val_cur)
return x
class MinMaxFloorObserver(ObserverBase):
'''
Calculate minmax of whole calibration dataset with floor but round.
'''
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False,
factory_kwargs=None):
super(MinMaxFloorObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs)
'''
The quant_type could be 'input', 'param', 'tensor', the co-responding
range is 1, 5, 5,
mth is 2, 3, 2
'''
self.quant_type = None
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.to(self.min_val.dtype)
if self.ch_axis == -1:
min_val_cur, max_val_cur = torch._aminmax(x)
else:
logger.warn('The per-tensor observer does not support per-channel min-max!')
min_val_cur, max_val_cur = torch._aminmax(x)
self.min_val = min_val_cur
self.max_val = max_val_cur
self._x = x
return x
def calculate_qparams(self):
if self.quant_type is None:
raise ValueError('You should set the observer type before forward!')
else:
scale_range = 1 if self.quant_type == 'input' else 5
mth = 3 if self.quant_type == 'param' else 2
scale, zero_point = self._calculate_qparams(self.min_val, self.max_val)
scale.data = scale.data * 0 + max(self.min_val / self.quant_min, self.max_val / self.quant_max)
if scale < 2 ** -15:
max_scale = 0
else:
max_scale = 1 / scale
max_scale = torch.floor(max_scale.log2())
min_loss = torch.tensor([float('inf')])
final_scale = max_scale
max_scale = int(max_scale)
for s in range(max_scale, max_scale + scale_range):
_s = 1 / 2 ** s
if mth == 3:
new_x = _s * torch.clamp(torch.round(self._x / _s), self.quant_min, self.quant_max)
elif mth == 2:
new_x = torch.clamp(self._x / _s, self.quant_min, self.quant_max)
new_x = torch.where((new_x < 0) & (new_x - new_x.floor() == 0.5), new_x.ceil(), new_x.round())
new_x *= _s
loss = ((new_x - self._x)**2).sum()
min_loss = min_loss.to(loss.device)
if loss < min_loss:
min_loss = loss
final_scale = s
final_scale = min(final_scale, 12)
scale = scale.data * 0 + 1 / (2 ** final_scale)
zero_point = torch.zeros_like(zero_point)
if not is_symmetric_quant(self.qscheme):
if self.min_val >= 0.:
zero_point = self.quant_min - torch.round(self.min_val / scale)
sync_tensor(scale)
sync_tensor(zero_point)
return scale, zero_point
def set_quant_type(self, qtype):
self.quant_type = qtype
class EMAMinMaxObserver(ObserverBase):
"""Moving average min/max among batches.
"""
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False,
quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, ema_ratio=0.9,
factory_kwargs=None):
super(EMAMinMaxObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs)
self.ema_ratio = ema_ratio
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.to(self.min_val.dtype)
if self.ch_axis == -1:
min_val_cur, max_val_cur = torch._aminmax(x)
else:
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
y = torch.flatten(y, start_dim=1)
min_val_cur, max_val_cur = torch._aminmax(y, 1)
if self.max_val.numel() <= 1 and self.max_val.isinf():
self.min_val = min_val_cur
self.max_val = max_val_cur
else:
self.min_val = self.min_val * self.ema_ratio + min_val_cur * (1.0 - self.ema_ratio)
self.max_val = self.max_val * self.ema_ratio + max_val_cur * (1.0 - self.ema_ratio)
return x
class EMAMinMaxFloorObserver(ObserverBase):
"""Moving average min/max among batches.
"""
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False,
quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, ema_ratio=0.9,
factory_kwargs=None):
super(EMAMinMaxFloorObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs)
self.ema_ratio = ema_ratio
self.quant_type = None
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.to(self.min_val.dtype)
self._x = x
if self.ch_axis == -1:
min_val_cur, max_val_cur = torch._aminmax(x)
else:
logger.warn('The per-tensor observer does not support per-channel min-max!')
min_val_cur, max_val_cur = torch._aminmax(x)
if self.max_val.numel() <= 1 and self.max_val.isinf():
self.min_val = min_val_cur
self.max_val = max_val_cur
else:
self.min_val = self.min_val * self.ema_ratio + min_val_cur * (1.0 - self.ema_ratio)
self.max_val = self.max_val * self.ema_ratio + max_val_cur * (1.0 - self.ema_ratio)
return x
def calculate_qparams(self):
if self.quant_type is None:
raise ValueError('You should set the observer type before forward!')
else:
scale_range = 1 if self.quant_type == 'input' else 5
mth = 3 if self.quant_type == 'param' else 2
scale, zero_point = self._calculate_qparams(self.min_val, self.max_val)
scale.data = scale.data * 0 + max(self.min_val / self.quant_min, self.max_val / self.quant_max)
if scale < 2 ** -15:
max_scale = 0
else:
max_scale = 1 / scale
max_scale = torch.floor(max_scale.log2())
max_scale = 1 / scale
max_scale = torch.floor(max_scale.log2())
min_loss = torch.tensor([float('inf')])
final_scale = max_scale
max_scale = int(max_scale)
for s in range(max_scale, max_scale + scale_range):
_s = 1 / 2 ** s
if mth == 3:
new_x = _s * torch.clamp(torch.round(self._x / _s), self.quant_min, self.quant_max)
elif mth == 2:
new_x = torch.clamp(self._x / _s, self.quant_min, self.quant_max)
new_x = torch.where((new_x < 0) & (new_x - new_x.floor() == 0.5), new_x.ceil(), new_x.round())
new_x *= _s
loss = ((new_x - self._x)**2).sum()
min_loss = min_loss.to(loss.device)
if loss < min_loss:
min_loss = loss
final_scale = s
final_scale = min(final_scale, 12)
scale = scale.data * 0 + 1 / (2 ** final_scale)
zero_point = torch.zeros_like(zero_point)
if not is_symmetric_quant(self.qscheme):
if self.min_val >= 0.:
zero_point = self.quant_min - torch.round(self.min_val / scale)
sync_tensor(scale)
sync_tensor(zero_point)
return scale, zero_point
def set_quant_type(self, qtype):
self.quant_type = qtype
class EMAQuantileObserver(ObserverBase):
"""Moving average quantile among batches.
"""
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False,
quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, ema_ratio=0.9,
threshold=0.99999, bins=2048, factory_kwargs=None):
super(EMAQuantileObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs)
assert self.ch_axis == -1, "Quantile observer only support in per-tensor scheme."
self.ema_ratio = ema_ratio
self.threshold = threshold
self.bins = bins
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.to(self.min_val.dtype)
min_val_cur, max_val_cur = torch._aminmax(x)
hist = torch.histc(torch.abs(x), bins=self.bins, min=0., max=torch.max(-min_val_cur, max_val_cur))
cur_total = 0
clip_value = torch.max(-min_val_cur, max_val_cur)
for i, cnt in enumerate(hist):
if cur_total + cnt >= self.threshold * x.numel():
clip_value = (i + 0.5) * (max_val_cur / self.bins)
break
if self.max_val.numel() <= 1 and self.max_val.isinf():
self.min_val = max(min_val_cur, -clip_value)
self.max_val = min(max_val_cur, clip_value)
else:
self.min_val = self.min_val * self.ema_ratio + max(min_val_cur, -clip_value) * (1.0 - self.ema_ratio)
self.max_val = self.max_val * self.ema_ratio + min(max_val_cur, clip_value) * (1.0 - self.ema_ratio)
return x
class ClipStdObserver(ObserverBase):
"""Clip std.
"""
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False,
quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, std_scale=2.6,
factory_kwargs=None):
super(ClipStdObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs=None)
self.std_scale = std_scale
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.to(self.min_val.dtype)
if self.ch_axis == -1:
min_val_cur, max_val_cur = torch._aminmax(x)
mean = x.mean()
std = x.std()
else:
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))]
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
y = torch.flatten(y, start_dim=1)
min_val_cur, max_val_cur = torch._aminmax(y, 1)
mean = y.mean(1)
std = y.std(1)
# using statistics to clip min and max
min_val = torch.minimum(mean - self.std_scale * std, min_val_cur)
max_val = torch.maximum(mean + self.std_scale * std, max_val_cur)
self.min_val = min_val
self.max_val = max_val
return x
class LSQObserver(ObserverBase):
'''
LSQ observer.
'''
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False,
quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, factory_kwargs=None):
super(LSQObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs)
self.tensor_norm = None
def forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.to(self.min_val.dtype)
if self.ch_axis == -1:
self.tensor_norm = x.abs().mean()
self.min_val, self.max_val = torch._aminmax(x)
else:
# compute channel-wise mean
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))]
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
y = torch.flatten(y, start_dim=1)
self.tensor_norm = y.abs().mean(1)
self.min_val, self.max_val = torch._aminmax(y)
return x
def calculate_qparams(self):
scale = 2 * self.tensor_norm / math.sqrt(self.quant_max)
if self.pot_scale:
scale = pot_quantization(scale)
zero_point = torch.zeros_like(self.tensor_norm)
if not is_symmetric_quant(self.qscheme):
if self.min_val >= 0.:
zero_point = self.quant_min - torch.round(self.min_val / scale)
sync_tensor(scale)
sync_tensor(zero_point)
return scale, zero_point
class LSQPlusObserver(ObserverBase):
'''
LSQ+ observer.
'''
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False,
quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, factory_kwargs=None):
super(LSQPlusObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs)
self.mean = None
self.std = None
def forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.to(self.min_val.dtype)
if self.ch_axis == -1:
self.mean = x.mean()
self.std = x.std()
self.min_val, self.max_val = torch._aminmax(x)
else:
# compute channel-wise mean
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
y = torch.flatten(y, start_dim=1)
self.mean = y.mean(1)
self.std = y.std(1)
self.min_val, self.max_val = torch._aminmax(y)
return x
def calculate_qparams(self):
scale = torch.maximum((self.mean - 3 * self.std).abs(),
(self.mean + 3 * self.std).abs()) / (self.quant_max - self.quant_min + 1)
if self.pot_scale:
scale = pot_quantization(scale)
zero_point = torch.zeros_like(self.mean)
if not is_symmetric_quant(self.qscheme):
if self.min_val >= 0.:
zero_point = self.quant_min - torch.round(self.min_val / scale)
sync_tensor(scale)
sync_tensor(zero_point)
return scale, zero_point
class MSEObserver(ObserverBase):
'''
Calculate mseobserver of whole calibration dataset.
'''
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, p=2.0,
factory_kwargs=None):
super(MSEObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs)
self.p = p
def lp_loss(self, pred, tgt):
"""
loss function measured in L_p Norm
"""
return (pred - tgt).abs().pow(self.p).mean()
def mse(self, x: torch.Tensor, x_min: torch.Tensor, x_max: torch.Tensor, iter=80):
best_score = 1e+10
best_min, best_max = torch.tensor([1.0], dtype=torch.float), torch.tensor([1.0], dtype=torch.float)
best_min.copy_(x_min)
best_max.copy_(x_max)
for i in range(iter):
new_min = x_min * (1.0 - (i * 0.01))
new_max = x_max * (1.0 - (i * 0.01))
scale, zero_point = self._calculate_qparams(new_min, new_max)
x_q = torch.fake_quantize_per_tensor_affine(
x, scale.item(), int(zero_point.item()),
self.quant_min, self.quant_max)
score = self.lp_loss(x_q, x)
if score < best_score:
best_score = score
best_min, best_max = new_min, new_max
return best_min, best_max
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.clone().detach().to(self.min_val.dtype)
if self.ch_axis == -1:
min_val_cur, max_val_cur = torch._aminmax(x)
min_val_cur, max_val_cur = self.mse(x, min_val_cur, max_val_cur, iter=95)
else:
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))]
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
x_channel = x.permute(new_axis_list)
y = torch.flatten(x_channel, start_dim=1)
min_val_cur, max_val_cur = torch._aminmax(y, 1)
for ch, val in enumerate(min_val_cur):
min_val_cur[ch], max_val_cur[ch] = self.mse(x_channel[ch], min_val_cur[ch], max_val_cur[ch], iter=80)
self.min_val = torch.min(self.min_val, min_val_cur)
self.max_val = torch.max(self.max_val, max_val_cur)
return x
class EMAMSEObserver(ObserverBase):
'''
Calculate mseobserver of whole calibration dataset.
'''
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False,
p=2.0, ema_ratio=0.9, factory_kwargs=None):
super(EMAMSEObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max,
ch_axis, pot_scale, factory_kwargs)
self.ema_ratio = ema_ratio
self.p = p
def lp_loss(self, pred, tgt):
"""
loss function measured in L_p Norm
"""
return (pred - tgt).abs().pow(self.p).mean()
def mse(self, x: torch.Tensor, x_min: torch.Tensor, x_max: torch.Tensor, iter=80):
best_score = 1e+10
best_min, best_max = torch.tensor([1.0], dtype=torch.float), torch.tensor([1.0], dtype=torch.float)
best_min.copy_(x_min)
best_max.copy_(x_max)
for i in range(iter):
new_min = x_min * (1.0 - (i * 0.01))
new_max = x_max * (1.0 - (i * 0.01))
scale, zero_point = self._calculate_qparams(new_min, new_max)
x_q = torch.fake_quantize_per_tensor_affine(
x, scale.item(), int(zero_point.item()),
self.quant_min, self.quant_max)
score = self.lp_loss(x_q, x)
if score < best_score:
best_score = score
best_min, best_max = new_min, new_max
return best_min, best_max
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.clone().detach().to(self.min_val.dtype)
if self.ch_axis == -1:
min_val_cur, max_val_cur = torch._aminmax(x)
min_val_cur, max_val_cur = self.mse(x, min_val_cur, max_val_cur, iter=95)
else:
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))]
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
x_channel = x.permute(new_axis_list)
y = torch.flatten(x_channel, start_dim=1)
min_val_cur, max_val_cur = torch._aminmax(y, 1)
for ch, val in enumerate(min_val_cur):
min_val_cur[ch], max_val_cur[ch] = self.mse(x_channel[ch], min_val_cur[ch],
max_val_cur[ch], iter=80)
if self.max_val.numel() <= 1 and self.max_val.isinf():
self.min_val = min_val_cur
self.max_val = max_val_cur
else:
self.min_val = self.min_val * self.ema_ratio + min_val_cur * (1.0 - self.ema_ratio)
self.max_val = self.max_val * self.ema_ratio + max_val_cur * (1.0 - self.ema_ratio)
return x
| [
"torch.round",
"torch.min",
"torch.max",
"torch.maximum",
"torch.minimum",
"torch.clamp",
"torch.abs",
"torch.tensor",
"torch._aminmax",
"torch.ones_like",
"torch.zeros_like",
"torch.flatten"
] | 1.8.1 | thb1314/mqbench-openvino | 476d64a18a009fa5c001895343929c0332224e1a |
1.2 | from typing import Tuple, Callable, Union, Type, List, Dict, Any
from itertools import combinations
import torch
import torch.nn as nn
import torch.nn.functional as F
from mars_gym.torch.init import lecun_normal_init
from mars_gym.model.abstract import RecommenderModule
import numpy as np
from mars_gym.meta_config import ProjectConfig, IOType, Column
class LogisticRegression(RecommenderModule):
def __init__(
self,
project_config: ProjectConfig,
index_mapping: Dict[str, Dict[Any, int]],
n_factors: int,
weight_init: Callable = lecun_normal_init,
):
super().__init__(project_config, index_mapping)
self.user_embeddings = nn.Embedding(self._n_users, n_factors)
self.item_embeddings = nn.Embedding(self._n_items, n_factors)
weight_init(self.user_embeddings.weight)
weight_init(self.item_embeddings.weight)
self.linear = nn.Linear(n_factors * 2, 1)
self.weight_init = weight_init
self.apply(self.init_weights)
def init_weights(self, module: nn.Module):
if type(module) == nn.Linear:
self.weight_init(module.weight)
module.bias.data.fill_(0.1)
def none_tensor(self, x):
return type(x) == type(None)
def forward(self, user_ids, item_ids, context_representation=None):
x: torch.Tensor = context_representation
# Geral embs
user_emb = self.user_embeddings(user_ids)
item_emb = self.item_embeddings(item_ids)
x = torch.cat((user_emb, item_emb), dim=1,)
return torch.sigmoid(self.linear(x))
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Embedding"
] | 1.2 | engsoares/mars-gym | c58605180a3f5fb16edbe8bd8954095b9f00a446 |
1.2 | import torch
import torch.nn as nn
import os
import torchvision
import torchvision.transforms as transforms
from tqdm import tqdm as pbar
import numpy as np
import time
from tqdm import tqdm
import scipy.linalg
import random
from random import shuffle
is_print = True
def _print(*args, **kwargs):
if is_print:
print(*args, **kwargs)
rand_mat = None
cashed = {}
def flatten(x):
return x.view(-1)
def make_dataloaders(params, raw=False, train=False, merge=False, permutate=False, train_size=1, map_dic=None):
if map_dic is None:
if merge:
label_vals = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
shuffle(label_vals)
map_dic = {k: v for k, v in zip(range(10), label_vals)}
else:
map_dic = {i: i for i in range(10)}
if raw:
transform_train = transforms.Compose([transforms.ToTensor()])
transform_validation = transforms.Compose([transforms.ToTensor()])
else:
if train:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])
else:
transform_train = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
transform_validation = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
# def target_tranform(labels):
if permutate:
def randomize(_):
random.randint(0, 9)
target_tranform = np.vectorize(randomize)
else:
target_tranform = np.vectorize(map_dic.get)
trainset = torchvision.datasets.CIFAR10(root=params['path'], train=True, transform=transform_train, target_transform=target_tranform, download=True)
testset = torchvision.datasets.CIFAR10(root=params['path'], train=False, transform=transform_validation, target_transform=target_tranform)
if train_size == 1:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=params['batch_size'], shuffle=train)
train_idxs = np.arange(len(trainset))
else:
train_idxs = np.random.permutation(np.arange(len(trainset)))[:int(len(trainset) * train_size)]
train = [trainset[i] for i in train_idxs]
trainloader = torch.utils.data.DataLoader(train, batch_size=params['batch_size'], shuffle=train)
testloader = torch.utils.data.DataLoader(testset, batch_size=params['batch_size'], shuffle=False)
return trainloader, testloader, (train_idxs, map_dic)
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, num_classes=10, width_mult=1.0):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
## CIFAR10
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 1], # Stride 2 -> 1 for CIFAR-10
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
## END
# building first layer
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * max(1.0, width_mult))
# CIFAR10: stride 2 -> 1
features = [ConvBNReLU(3, input_channel, stride=1)]
# END
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = x.mean([2, 3])
x = self.classifier(x)
return x
def get_x(self, x):
return x.view(x.size()[0], -1).cpu().numpy()
def get_top_h(self, x):
x = self.features(x)
x = x.mean([2, 3])
return x.cpu().numpy()
def get_all_h(self, x):
tmp = x
tmps = []
for layer in self.features:
tmp = layer(tmp)
tmps.append(tmp.view(tmp.size()[0], -1))
tmp = tmp.mean([2, 3])
tmps.append(tmp.view(tmp.size()[0], -1))
all_h = torch.cat(tmps, dim=1)
return all_h.cpu().numpy()
def get_flat_param(self):
return torch.cat([flatten(p) for p in self.parameters()])
def get_flat_param_grad(self):
return torch.cat([flatten(p.grad) for p in self.parameters()])
# def get_flat_param_grad_var(self):
# return torch.cat([flatten(p.grad_var) for p in self.parameters()])
def get_grad_loss(self, test_data):
criterion = nn.CrossEntropyLoss()
image, label = test_data
image = image.to(self.device)
label = label.to(self.device)
logit = self.forward(image)
loss = criterion(logit, label)
self.zero_grad()
loss.backward()
grad = self.get_flat_param_grad()
return grad.cpu().numpy()
def get_grad_output(self, test_data):
image, label = test_data
image = image.to(self.device)
logit = self.forward(image)
preds = nn.functional.softmax(logit)
assert len(preds) == 1
preds = preds[0]
grads = []
for p in preds:
self.zero_grad()
p.backward(retain_graph=True)
grad = self.get_flat_param_grad()
grads.append(grad.cpu().numpy())
return np.vstack(grads)
def get_influence_on_test_loss(self, test_data, train_data, test_description='',
approx_type='cg', approx_params={}, force_refresh=False,
matrix='none', sim_func=np.dot):
self.train_data = train_data
test_grad = self.get_grad_loss(test_data).astype(np.float32)
if matrix == 'none':
inverse_hvp = test_grad
else:
assert False
start_time = time.time()
predicted_loss_diffs = []
train_grad_filename = os.path.join(self.out_dir, 'train-grad-on-loss-all.npy')
if train_grad_filename in cashed:
train_grads = cashed[train_grad_filename]
elif os.path.exists(train_grad_filename):
train_grads = np.load(train_grad_filename).astype(np.float32)
_print('Loaded train grads from {}'.format(train_grad_filename))
else:
train_grad_list = []
for counter, remove_data in enumerate(tqdm(train_data)):
train_grad = self.get_grad_loss(remove_data)
train_grad_list.append(train_grad.astype(np.float16))
train_grads = np.vstack(train_grad_list)
np.save(train_grad_filename, train_grads)
_print('Saved train grads to {}'.format(train_grad_filename))
if train_grad_filename not in cashed:
cashed[train_grad_filename] = train_grads
for counter, train_grad in enumerate(tqdm(train_grads)):
predicted_loss_diffs.append(sim_func(inverse_hvp, train_grad))
duration = time.time() - start_time
_print('Multiplying by all train examples took {} sec'.format(duration))
return predicted_loss_diffs
def get_influence_by_hsim(self, test_data, train_data, h_fn, batch_size=100, sim_func=np.dot):
with torch.no_grad():
if h_fn == 'top_h':
h_func = self.get_top_h
elif h_fn == 'all_h':
h_func = self.get_all_h
elif h_fn == 'x':
h_func = self.get_x
else:
assert False
image, label = test_data
image = image.to(self.device)
test_feature = h_func(image).astype(np.float32)[0]
feature_sims = []
train_feature_filename = os.path.join(self.out_dir, 'train-{}-all.npy'.format(h_fn))
if train_feature_filename in cashed:
train_features = cashed[train_feature_filename]
elif os.path.exists(train_feature_filename):
train_features = np.load(train_feature_filename).astype(np.float32)
_print('Loaded train features from {}'.format(train_feature_filename))
else:
train_feature_list = []
for batch in tqdm(train_data):
image, label = batch
image = image.to(self.device)
features = h_func(image).astype(np.float32)
train_feature_list.append(features)
train_features = np.vstack(train_feature_list)
np.save(train_feature_filename, train_features)
_print('Saved train features to {}'.format(train_feature_filename))
if train_feature_filename not in cashed:
cashed[train_feature_filename] = train_features
for counter, train_feature in enumerate(tqdm(train_features)):
feature_sims.append(sim_func(test_feature, train_feature))
return feature_sims
def mobilenet_v2(pretrained=False, progress=True, device='cpu', model_path='', **kwargs):
model = MobileNetV2(**kwargs)
if pretrained:
script_dir = os.path.dirname(__file__)
state_dict = torch.load(model_path, map_location=device)
model.load_state_dict(state_dict)
return model
def test_model(model, params):
model = model.to(params['device']).eval()
phase = 'validation'
logs = {'Accuracy': 0.0}
# Iterate over data
for image, label in pbar(params[phase + '_loader']):
image = image.to(params['device'])
label = label.to(params['device'])
prediction = model(image)
accuracy = torch.sum(torch.max(prediction, 1)[1] == label.data).item()
logs['Accuracy'] += accuracy
logs['Accuracy'] /= len(params[phase + '_loader'].dataset)
return logs['Accuracy']
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.init.normal_",
"torch.utils.data.DataLoader",
"torch.nn.init.zeros_",
"torch.max",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.no_grad",
"torch.nn.init.ones_",
"torch.nn.ReLU6"
] | 1.2.0 | k-hanawa/criteria_for_instance_based_explanation | bb6ae19a9164748e1fac08f8a7a1ad0adf28a94c |
1.6 | # -*- coding: utf-8 -*-
import unittest
from argparse import Namespace
from io import StringIO
from comet.models import CometRanker
from comet.models.utils import average_pooling, max_pooling
import torch
class TestCometRanker(unittest.TestCase):
hparams = Namespace(
**{"encoder_model": "LASER", "pretrained_model": None, "nr_frozen_epochs": 0}
)
ranker = CometRanker(hparams)
def test_read_csv(self):
csv_file = StringIO(
"src,ref,pos,neg\n" "ola mundo,hi world,hey world,hey world!\n"
)
expected = [
{
"src": "ola mundo",
"ref": "hi world",
"pos": "hey world",
"neg": "hey world!",
}
]
data = self.ranker.read_csv(csv_file)
self.assertListEqual(data, expected)
# Test ignore extra columns
csv_file = StringIO(
"src,ref,pos,neg,id\n" "ola mundo,hi world,hey world,hey world!,10293\n"
)
data = self.ranker.read_csv(csv_file)
self.assertListEqual(data, expected)
def test_compute_metrics(self):
dummy_outputs = [
{
"val_prediction": {
"src_sentemb": torch.tensor(
[[0.4963, 0.7682, 0.0885], [0.1320, 0.3074, 0.6341]]
),
"ref_sentemb": torch.tensor(
[[0.4901, 0.8964, 0.4556], [0.6323, 0.3489, 0.4017]]
),
"pos_sentemb": torch.tensor(
[[0.0223, 0.1689, 0.2939], [0.5185, 0.6977, 0.8000]]
),
"neg_sentemb": torch.tensor(
[[0.1610, 0.2823, 0.6816], [0.9152, 0.3971, 0.8742]]
),
}
}
]
# Distance from positive embedding to source: [0.7912, 0.5738]
# Distance from positive embedding to reference: [0.8800, 0.5415]
# Harmonic mean: [0.8332, 0.5572]
# Distance from negative embedding to source: [0.8369, 0.8240]
# Distance from positive embedding to reference: [0.7325, 0.5528]
# Harmonic mean: [0.7812, 0.6617]
expected = {"kendall": torch.tensor(0.0)}
metrics = self.ranker.compute_metrics(dummy_outputs)
self.assertDictEqual(metrics, expected)
def test_prepare_sample_to_forward(self):
""" Test compatability between prepare_sample and forward functions. """
sample = [
{
"src": "hello world",
"ref": "ola mundo",
"pos": "ola mundo",
"neg": "oi mundo",
}
]
model_input, target = self.ranker.prepare_sample(sample)
model_output = self.ranker(**model_input)
self.assertTrue(model_output["src_sentemb"].shape[0] == 1)
self.assertTrue(model_output["ref_sentemb"].shape[0] == 1)
self.assertTrue(model_output["pos_sentemb"].shape[0] == 1)
self.assertTrue(model_output["neg_sentemb"].shape[0] == 1)
self.assertTrue(model_output["src_sentemb"].shape[1] == 1024)
self.assertTrue(model_output["ref_sentemb"].shape[1] == 1024)
self.assertTrue(model_output["pos_sentemb"].shape[1] == 1024)
self.assertTrue(model_output["neg_sentemb"].shape[1] == 1024)
def test_get_sentence_embedding(self):
self.ranker.scalar_mix = None
self.ranker.layer = 0
# tokens from ["hello world", "how are your?"]
tokens = torch.tensor([[29733, 4139, 1, 1], [2231, 137, 57374, 8]])
lengths = torch.tensor([2, 4])
encoder_out = self.ranker.encoder(tokens, lengths)
# Expected sentence output with pool = 'default'
hparams = Namespace(**{"encoder_model": "LASER", "pool": "default"})
self.ranker.hparams = hparams
expected = encoder_out["sentemb"]
sentemb = self.ranker.get_sentence_embedding(tokens, lengths)
self.assertTrue(torch.equal(sentemb, expected))
# Expected sentence output with pool = 'max'
hparams = Namespace(
# LASER always used default... we need to pretend our encoder is another one
**{"encoder_model": "other", "pool": "max"}
)
self.ranker.hparams = hparams
# Max pooling is tested in test_utils.py
expected = max_pooling(tokens, encoder_out["wordemb"], 1)
sentemb = self.ranker.get_sentence_embedding(tokens, lengths)
self.assertTrue(torch.equal(sentemb, expected))
# Expected sentence output with pool = 'avg'
hparams = Namespace(
# LASER always used default... we need to pretend our encoder is another one
**{"encoder_model": "other", "pool": "avg"}
)
self.ranker.hparams = hparams
# AVG pooling is tested in test_utils.py
expected = average_pooling(
tokens, encoder_out["wordemb"], encoder_out["mask"], 1
)
sentemb = self.ranker.get_sentence_embedding(tokens, lengths)
self.assertTrue(torch.equal(sentemb, expected))
# Expected sentence output with pool = 'cls'
hparams = Namespace(
# LASER always used default... we need to pretend our encoder is another one
**{"encoder_model": "other", "pool": "cls"}
)
self.ranker.hparams = hparams
expected = encoder_out["wordemb"][:, 0, :]
sentemb = self.ranker.get_sentence_embedding(tokens, lengths)
self.assertTrue(torch.equal(sentemb, expected))
| [
"torch.equal",
"torch.tensor"
] | 1.6 | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 |
1.6 | # -*- coding: utf-8 -*-
import unittest
from argparse import Namespace
from io import StringIO
import numpy as np
import torch
from comet.models import CometEstimator
from comet.models.utils import average_pooling, max_pooling
class TestCometEstimator(unittest.TestCase):
hparams = Namespace(
**{
"encoder_model": "LASER",
"pretrained_model": None,
"nr_frozen_epochs": 0,
"loss": "mse",
# FeedForward Definition
"pool": "default",
"hidden_sizes": "1024",
"activations": "Tanh",
"dropout": 0.1,
"final_activation": False,
}
)
estimator = CometEstimator(hparams)
def test_read_csv(self):
csv_file = StringIO("src,ref,mt,score\n" "ola mundo,hi world,hey world!,0.8\n")
expected = [
{"src": "ola mundo", "ref": "hi world", "mt": "hey world!", "score": 0.8}
]
data = self.estimator.read_csv(csv_file)
self.assertListEqual(data, expected)
# Test ignore extra columns
csv_file = StringIO(
"src,ref,mt,score,id\n" "ola mundo,hi world,hey world!,0.8,10299\n"
)
data = self.estimator.read_csv(csv_file)
self.assertListEqual(data, expected)
def test_compute_metrics(self):
dummy_outputs = [
{
"val_prediction": {
"score": torch.tensor(np.array([0, 0, 0, 1, 1, 1, 1])),
},
"val_target": {
"score": torch.tensor(np.arange(7)),
},
}
]
expected = {
"pearson": torch.tensor(0.8660254, dtype=torch.float32),
"kendall": torch.tensor(0.7559289, dtype=torch.float32),
"spearman": torch.tensor(0.866025, dtype=torch.float32),
}
metrics = self.estimator.compute_metrics(dummy_outputs)
self.assertDictEqual(
{k: round(v.item(), 4) for k, v in metrics.items()},
{k: round(v.item(), 4) for k, v in expected.items()},
)
def test_prepare_sample_to_forward(self):
""" Test compatability between prepare_sample and forward functions. """
sample = [
{"src": "ola mundo", "ref": "hi world", "mt": "hey world!", "score": 0.8},
{"src": "ola mundo", "ref": "hi world", "mt": "hey world!", "score": 0.8},
]
model_input, target = self.estimator.prepare_sample(sample)
model_output = self.estimator(**model_input)
self.assertTrue(model_output["score"].shape[0] == 2)
self.assertTrue(model_output["score"].shape[1] == 1)
def test_get_sentence_embedding(self):
self.estimator.scalar_mix = None
self.estimator.layer = 0
# tokens from ["hello world", "how are your?"]
tokens = torch.tensor([[29733, 4139, 1, 1], [2231, 137, 57374, 8]])
lengths = torch.tensor([2, 4])
encoder_out = self.estimator.encoder(tokens, lengths)
# Expected sentence output with pool = 'max'
hparams = Namespace(
# LASER always used default... we need to pretend our encoder is another one
**{"encoder_model": "other", "pool": "max"}
)
self.estimator.hparams = hparams
# Max pooling is tested in test_utils.py
expected = max_pooling(tokens, encoder_out["wordemb"], 1)
sentemb = self.estimator.get_sentence_embedding(tokens, lengths)
self.assertTrue(torch.equal(sentemb, expected))
# Expected sentence output with pool = 'avg'
hparams = Namespace(
# LASER always used default... we need to pretend our encoder is another one
**{"encoder_model": "other", "pool": "avg"}
)
self.estimator.hparams = hparams
# AVG pooling is tested in test_utils.py
expected = average_pooling(
tokens, encoder_out["wordemb"], encoder_out["mask"], 1
)
sentemb = self.estimator.get_sentence_embedding(tokens, lengths)
self.assertTrue(torch.equal(sentemb, expected))
# Expected sentence output with pool = 'cls'
hparams = Namespace(
# LASER always used default... we need to pretend our encoder is another one
**{"encoder_model": "other", "pool": "cls"}
)
self.estimator.hparams = hparams
expected = encoder_out["wordemb"][:, 0, :]
sentemb = self.estimator.get_sentence_embedding(tokens, lengths)
self.assertTrue(torch.equal(sentemb, expected))
# Expected sentence output with pool = 'cls+avg'
hparams = Namespace(
# LASER always used default... we need to pretend our encoder is another one
**{"encoder_model": "other", "pool": "cls+avg"}
)
self.estimator.hparams = hparams
cls_embedding = encoder_out["wordemb"][:, 0, :]
avg_embedding = average_pooling(
tokens, encoder_out["wordemb"], encoder_out["mask"], 1
)
expected = torch.cat((cls_embedding, avg_embedding), dim=1)
sentemb = self.estimator.get_sentence_embedding(tokens, lengths)
self.assertTrue(torch.equal(sentemb, expected))
# Expected sentence output with pool = 'default'
hparams = Namespace(**{"encoder_model": "LASER", "pool": "default"})
self.estimator.hparams = hparams
expected = encoder_out["sentemb"]
sentemb = self.estimator.get_sentence_embedding(tokens, lengths)
self.assertTrue(torch.equal(sentemb, expected))
| [
"torch.cat",
"torch.tensor",
"torch.equal"
] | 1.6 | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 |
1.3 | #! /usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2019 JR Oakes
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import config as cfg
import torch
import pandas as pd
from tqdm import tqdm
from torch import nn
import transformers
from transformers import BertTokenizer, BertModel, DistilBertTokenizer, DistilBertModel
import logging
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)
logging.getLogger("transformers.configuration_utils").setLevel(logging.WARN)
class BERT:
def __init__(self, dims = None):
if 'distilbert' in cfg.transformer_model:
self.model = DistilBertModel.from_pretrained(cfg.transformer_model)
self.tokenizer = DistilBertTokenizer.from_pretrained(cfg.transformer_model)
self.model_dims = self.model.config.dim
else:
self.model = BertModel.from_pretrained(cfg.transformer_model)
self.tokenizer = BertTokenizer.from_pretrained(cfg.transformer_model)
self.model_dims = self.model.config.hidden_size
# Make sure model is in Eval mode.
self.model.eval()
self.terms = []
self.embeddings = torch.FloatTensor([])
self.reduce = nn.Linear(self.model_dims, dims) if dims else None
self.activation = nn.Tanh()
self.sim_fn = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
def add_terms(self, texts):
for t in texts:
self.add_term(t)
def add_term(self, text):
if text not in self.terms:
emb = self.get_embedding(text)
self.terms.append(text)
self.embeddings = torch.cat((self.embeddings, emb), dim=0)
def get_embedding(self, text):
with torch.no_grad():
input_ids = torch.LongTensor(self.tokenizer.encode(text)).unsqueeze(0)
lh = self.model(input_ids)[0]
emb = torch.mean(lh, dim=1)
if self.reduce is not None:
emb = self.reduce(emb)
return self.activation(emb)
def get_most_similar(self, term):
emb = self.get_embedding(term)
comp = emb.repeat(len(self.embeddings), 1)
sim = self.sim_fn(self.embeddings, comp)
best = sim.argmax().item()
return self.terms[best], sim[best].item()
def get_similar_df(self, term):
emb = self.get_embedding(term)
comp = emb.repeat(len(self.embeddings), 1)
sim = self.sim_fn(self.embeddings, comp)
df = pd.DataFrame(columns=['terms', 'sim'])
df['terms'] = self.terms
df['sim'] = sim.tolist()
return df
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Tanh",
"torch.FloatTensor",
"torch.no_grad",
"torch.nn.CosineSimilarity",
"torch.mean"
] | 1.3.1 | jroakes/tech-seo-crawler | c60619cb6517069665e229917cfbc4fd0614d36f |
1.4 | """DAgger (https://arxiv.org/pdf/1011.0686.pdf).
Interactively trains policy by collecting some demonstrations, doing BC, collecting more
demonstrations, doing BC again, etc. Initially the demonstrations just come from the
expert's policy; over time, they shift to be drawn more and more from the imitator's
policy.
"""
import abc
import dataclasses
import logging
import os
import pathlib
from typing import Callable, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import torch as th
from stable_baselines3.common import policies, utils, vec_env
from torch.utils import data as th_data
from imitation.algorithms import base, bc
from imitation.data import rollout, types
from imitation.util import logger, util
class BetaSchedule(abc.ABC):
"""Computes beta (% of time demonstration action used) from training round."""
@abc.abstractmethod
def __call__(self, round_num: int) -> float:
"""Computes the value of beta for the current round.
Args:
round_num: the current round number. Rounds are assumed to be sequentially
numbered from 0.
Returns:
The fraction of the time to sample a demonstrator action. Robot
actions will be sampled the remainder of the time.
""" # noqa: DAR202
class LinearBetaSchedule(BetaSchedule):
"""Linearly-decreasing schedule for beta."""
def __init__(self, rampdown_rounds: int):
"""Builds LinearBetaSchedule.
Args:
rampdown_rounds: number of rounds over which to anneal beta.
"""
self.rampdown_rounds = rampdown_rounds
def __call__(self, round_num: int) -> float:
"""Computes beta value.
Args:
round_num: the current round number.
Returns:
beta linearly decreasing from `1` to `0` between round `0` and
`self.rampdown_rounds`. After that, it is 0.
"""
assert round_num >= 0
return min(1, max(0, (self.rampdown_rounds - round_num) / self.rampdown_rounds))
def reconstruct_trainer(
scratch_dir: types.AnyPath,
venv: vec_env.VecEnv,
custom_logger: Optional[logger.HierarchicalLogger] = None,
device: Union[th.device, str] = "auto",
) -> "DAggerTrainer":
"""Reconstruct trainer from the latest snapshot in some working directory.
Requires vectorized environment and (optionally) a logger, as these objects
cannot be serialized.
Args:
scratch_dir: path to the working directory created by a previous run of
this algorithm. The directory should contain `checkpoint-latest.pt` and
`policy-latest.pt` files.
venv: Vectorized training environment.
custom_logger: Where to log to; if None (default), creates a new logger.
device: device on which to load the trainer.
Returns:
A deserialized `DAggerTrainer`.
"""
custom_logger = custom_logger or logger.configure()
checkpoint_path = pathlib.Path(scratch_dir, "checkpoint-latest.pt")
trainer = th.load(checkpoint_path, map_location=utils.get_device(device))
trainer.venv = venv
trainer._logger = custom_logger
return trainer
def _save_dagger_demo(
trajectory: types.Trajectory,
save_dir: types.AnyPath,
prefix: str = "",
) -> None:
# TODO(shwang): This is possibly redundant with types.save(). Note
# however that NPZ save here is likely more space efficient than
# pickle from types.save(), and types.save only accepts
# TrajectoryWithRew right now (subclass of Trajectory).
save_dir = pathlib.Path(save_dir)
assert isinstance(trajectory, types.Trajectory)
actual_prefix = f"{prefix}-" if prefix else ""
timestamp = util.make_unique_timestamp()
filename = f"{actual_prefix}dagger-demo-{timestamp}.npz"
save_dir.mkdir(parents=True, exist_ok=True)
npz_path = pathlib.Path(save_dir, filename)
np.savez_compressed(npz_path, **dataclasses.asdict(trajectory))
logging.info(f"Saved demo at '{npz_path}'")
def _load_trajectory(npz_path: str) -> types.Trajectory:
"""Load a single trajectory from a compressed Numpy file."""
np_data = np.load(npz_path, allow_pickle=True)
has_rew = "rews" in np_data
dict_data = dict(np_data.items())
# infos=None is saved as array(None) which leads to a type checking error upon
# `Trajectory` initialization. Convert to None to prevent error.
infos = dict_data["infos"]
if infos.shape == ():
assert infos.item() is None
dict_data["infos"] = None
cls = types.TrajectoryWithRew if has_rew else types.Trajectory
return cls(**dict_data)
class InteractiveTrajectoryCollector(vec_env.VecEnvWrapper):
"""DAgger VecEnvWrapper for querying and saving expert actions.
Every call to `.step(actions)` accepts and saves expert actions to `self.save_dir`,
but only forwards expert actions to the wrapped VecEnv with probability
`self.beta`. With probability `1 - self.beta`, a "robot" action (i.e
an action from the imitation policy) is forwarded instead.
Demonstrations are saved as `TrajectoryWithRew` to `self.save_dir` at the end
of every episode.
"""
def __init__(
self,
venv: vec_env.VecEnv,
get_robot_acts: Callable[[np.ndarray], np.ndarray],
beta: float,
save_dir: types.AnyPath,
):
"""Builds InteractiveTrajectoryCollector.
Args:
venv: vectorized environment to sample trajectories from.
get_robot_acts: get robot actions that can be substituted for
human actions. Takes a vector of observations as input & returns a
vector of actions.
beta: fraction of the time to use action given to .step() instead of
robot action. The choice of robot or human action is independently
randomized for each individual `Env` at every timestep.
save_dir: directory to save collected trajectories in.
"""
super().__init__(venv)
self.get_robot_acts = get_robot_acts
assert 0 <= beta <= 1
self.beta = beta
self.traj_accum = None
self.save_dir = save_dir
self._last_obs = None
self._done_before = True
self._is_reset = False
self._last_user_actions = None
self.rng = np.random.RandomState()
def seed(self, seed=Optional[int]) -> List[Union[None, int]]:
"""Set the seed for the DAgger random number generator and wrapped VecEnv.
The DAgger RNG is used along with `self.beta` to determine whether the expert
or robot action is forwarded to the wrapped VecEnv.
Args:
seed: The random seed. May be None for completely random seeding.
Returns:
A list containing the seeds for each individual env. Note that all list
elements may be None, if the env does not return anything when seeded.
"""
self.rng = np.random.RandomState(seed=seed)
return self.venv.seed(seed)
def reset(self) -> np.ndarray:
"""Resets the environment.
Returns:
obs: first observation of a new trajectory.
"""
self.traj_accum = rollout.TrajectoryAccumulator()
obs = self.venv.reset()
for i, ob in enumerate(obs):
self.traj_accum.add_step({"obs": ob}, key=i)
self._last_obs = obs
self._is_reset = True
self._last_user_actions = None
return obs
def step_async(self, actions: np.ndarray) -> None:
"""Steps with a `1 - beta` chance of using `self.get_robot_acts` instead.
DAgger needs to be able to inject imitation policy actions randomly at some
subset of time steps. This method has a `self.beta` chance of keeping the
`actions` passed in as an argument, and a `1 - self.beta` chance of
will forwarding an actions generated by `self.get_robot_acts` instead.
"robot" (i.e. imitation policy) action if necessary.
At the end of every episode, a `TrajectoryWithRew` is saved to `self.save_dir`,
where every saved action is the expert action, regardless of whether the
robot action was used during that timestep.
Args:
actions: the _intended_ demonstrator/expert actions for the current
state. This will be executed with probability `self.beta`.
Otherwise, a "robot" (typically a BC policy) action will be sampled
and executed instead via `self.get_robot_act`.
"""
assert self._is_reset, "call .reset() before .step()"
# Replace each given action with a robot action 100*(1-beta)% of the time.
actual_acts = np.array(actions)
mask = self.rng.uniform(0, 1, size=(self.num_envs,)) > self.beta
if np.sum(mask) != 0:
actual_acts[mask] = self.get_robot_acts(self._last_obs[mask])
self._last_user_actions = actions
self.venv.step_async(actual_acts)
def step_wait(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, dict]:
"""Returns observation, reward, etc after previous `step_async()` call.
Stores the transition, and saves trajectory as demo once complete.
Returns:
Observation, reward, dones (is terminal?) and info dict.
"""
next_obs, rews, dones, infos = self.venv.step_wait()
self._last_obs = next_obs
fresh_demos = self.traj_accum.add_steps_and_auto_finish(
obs=next_obs,
acts=self._last_user_actions,
rews=rews,
infos=infos,
dones=dones,
)
for traj in fresh_demos:
_save_dagger_demo(traj, self.save_dir)
return next_obs, rews, dones, infos
class NeedsDemosException(Exception):
"""Signals demos need to be collected for current round before continuing."""
class DAggerTrainer(base.BaseImitationAlgorithm):
"""DAgger training class with low-level API suitable for interactive human feedback.
In essence, this is just BC with some helpers for incrementally
resuming training and interpolating between demonstrator/learnt policies.
Interaction proceeds in "rounds" in which the demonstrator first provides a
fresh set of demonstrations, and then an underlying `BC` is invoked to
fine-tune the policy on the entire set of demonstrations collected in all
rounds so far. Demonstrations and policy/trainer checkpoints are stored in a
directory with the following structure::
scratch-dir-name/
checkpoint-001.pkl
checkpoint-002.pkl
…
checkpoint-XYZ.pkl
checkpoint-latest.pkl
demos/
round-000/
demos_round_000_000.npz
demos_round_000_001.npz
…
round-001/
demos_round_001_000.npz
…
…
round-XYZ/
…
"""
DEFAULT_N_EPOCHS: int = 4
"""The default number of BC training epochs in `extend_and_update`."""
def __init__(
self,
*,
venv: vec_env.VecEnv,
scratch_dir: types.AnyPath,
beta_schedule: Callable[[int], float] = None,
bc_trainer: bc.BC,
custom_logger: Optional[logger.HierarchicalLogger] = None,
):
"""Builds DAggerTrainer.
Args:
venv: Vectorized training environment.
scratch_dir: Directory to use to store intermediate training
information (e.g. for resuming training).
beta_schedule: Provides a value of `beta` (the probability of taking
expert action in any given state) at each round of training. If
`None`, then `linear_beta_schedule` will be used instead.
bc_trainer: A `BC` instance used to train the underlying policy.
custom_logger: Where to log to; if None (default), creates a new logger.
"""
super().__init__(custom_logger=custom_logger)
if beta_schedule is None:
beta_schedule = LinearBetaSchedule(15)
self.beta_schedule = beta_schedule
self.scratch_dir = pathlib.Path(scratch_dir)
self.venv = venv
self.round_num = 0
self._last_loaded_round = -1
self._all_demos = []
utils.check_for_correct_spaces(
self.venv,
bc_trainer.observation_space,
bc_trainer.action_space,
)
self.bc_trainer = bc_trainer
self.bc_trainer.logger = custom_logger
def __getstate__(self):
"""Return state excluding non-pickleable objects."""
d = dict(self.__dict__)
del d["venv"]
del d["_logger"]
return d
@base.BaseImitationAlgorithm.logger.setter
def logger(self, value: logger.HierarchicalLogger) -> None:
# DAgger and inner-BC logger should stay in sync
self._logger = value
self.bc_trainer.logger = value
@property
def policy(self) -> policies.BasePolicy:
return self.bc_trainer.policy
@property
def batch_size(self) -> int:
return self.bc_trainer.batch_size
def _load_all_demos(self):
num_demos_by_round = []
for round_num in range(self._last_loaded_round + 1, self.round_num + 1):
round_dir = self._demo_dir_path_for_round(round_num)
demo_paths = self._get_demo_paths(round_dir)
self._all_demos.extend(_load_trajectory(p) for p in demo_paths)
num_demos_by_round.append(len(demo_paths))
logging.info(f"Loaded {len(self._all_demos)} total")
demo_transitions = rollout.flatten_trajectories(self._all_demos)
return demo_transitions, num_demos_by_round
def _get_demo_paths(self, round_dir):
return [
os.path.join(round_dir, p)
for p in os.listdir(round_dir)
if p.endswith(".npz")
]
def _demo_dir_path_for_round(self, round_num: Optional[int] = None) -> pathlib.Path:
if round_num is None:
round_num = self.round_num
return self.scratch_dir / "demos" / f"round-{round_num:03d}"
def _try_load_demos(self) -> None:
"""Load the dataset for this round into self.bc_trainer as a DataLoader."""
demo_dir = self._demo_dir_path_for_round()
demo_paths = self._get_demo_paths(demo_dir) if os.path.isdir(demo_dir) else []
if len(demo_paths) == 0:
raise NeedsDemosException(
f"No demos found for round {self.round_num} in dir '{demo_dir}'. "
f"Maybe you need to collect some demos? See "
f".get_trajectory_collector()",
)
if self._last_loaded_round < self.round_num:
transitions, num_demos = self._load_all_demos()
logging.info(
f"Loaded {sum(num_demos)} new demos from {len(num_demos)} rounds",
)
if len(transitions) < self.batch_size:
raise ValueError(
"Not enough transitions to form a single batch: "
f"self.batch_size={self.batch_size} > "
f"len(transitions)={len(transitions)}",
)
data_loader = th_data.DataLoader(
transitions,
self.batch_size,
drop_last=True,
shuffle=True,
collate_fn=types.transitions_collate_fn,
)
self.bc_trainer.set_demonstrations(data_loader)
self._last_loaded_round = self.round_num
def extend_and_update(self, bc_train_kwargs: Optional[Mapping] = None) -> int:
"""Extend internal batch of data and train BC.
Specifically, this method will load new transitions (if necessary), train
the model for a while, and advance the round counter. If there are no fresh
demonstrations in the demonstration directory for the current round, then
this will raise a `NeedsDemosException` instead of training or advancing
the round counter. In that case, the user should call
`.get_trajectory_collector()` and use the returned
`InteractiveTrajectoryCollector` to produce a new set of demonstrations for
the current interaction round.
Arguments:
bc_train_kwargs: Keyword arguments for calling `BC.train()`. If
the `log_rollouts_venv` key is not provided, then it is set to
`self.venv` by default. If neither of the `n_epochs` and `n_batches`
keys are provided, then `n_epochs` is set to `self.DEFAULT_N_EPOCHS`.
Returns:
New round number after advancing the round counter.
"""
if bc_train_kwargs is None:
bc_train_kwargs = {}
else:
bc_train_kwargs = dict(bc_train_kwargs)
user_keys = bc_train_kwargs.keys()
if "log_rollouts_venv" not in user_keys:
bc_train_kwargs["log_rollouts_venv"] = self.venv
if "n_epochs" not in user_keys and "n_batches" not in user_keys:
bc_train_kwargs["n_epochs"] = self.DEFAULT_N_EPOCHS
logging.info("Loading demonstrations")
self._try_load_demos()
logging.info(f"Training at round {self.round_num}")
self.bc_trainer.train(**bc_train_kwargs)
self.round_num += 1
logging.info(f"New round number is {self.round_num}")
return self.round_num
def get_trajectory_collector(self) -> InteractiveTrajectoryCollector:
"""Create trajectory collector to extend current round's demonstration set.
Returns:
A collector configured with the appropriate beta, imitator policy, etc.
for the current round. Refer to the documentation for
`InteractiveTrajectoryCollector` to see how to use this.
"""
save_dir = self._demo_dir_path_for_round()
beta = self.beta_schedule(self.round_num)
collector = InteractiveTrajectoryCollector(
venv=self.venv,
get_robot_acts=lambda acts: self.bc_trainer.policy.predict(acts)[0],
beta=beta,
save_dir=save_dir,
)
return collector
def save_trainer(self) -> Tuple[pathlib.Path, pathlib.Path]:
"""Create a snapshot of trainer in the scratch/working directory.
The created snapshot can be reloaded with `reconstruct_trainer()`.
In addition to saving one copy of the policy in the trainer snapshot, this
method saves a second copy of the policy in its own file. Having a second copy
of the policy is convenient because it can be loaded on its own and passed to
evaluation routines for other algorithms.
Returns:
checkpoint_path: a path to one of the created `DAggerTrainer` checkpoints.
policy_path: a path to one of the created `DAggerTrainer` policies.
"""
self.scratch_dir.mkdir(parents=True, exist_ok=True)
# save full trainer checkpoints
checkpoint_paths = [
self.scratch_dir / f"checkpoint-{self.round_num:03d}.pt",
self.scratch_dir / "checkpoint-latest.pt",
]
for checkpoint_path in checkpoint_paths:
th.save(self, checkpoint_path)
# save policies separately for convenience
policy_paths = [
self.scratch_dir / f"policy-{self.round_num:03d}.pt",
self.scratch_dir / "policy-latest.pt",
]
for policy_path in policy_paths:
self.save_policy(policy_path)
return checkpoint_paths[0], policy_paths[0]
def save_policy(self, policy_path: types.AnyPath) -> None:
"""Save the current policy only (and not the rest of the trainer).
Args:
policy_path: path to save policy to.
"""
self.bc_trainer.save_policy(policy_path)
class SimpleDAggerTrainer(DAggerTrainer):
"""Simpler subclass of DAggerTrainer for training with synthetic feedback."""
def __init__(
self,
*,
venv: vec_env.VecEnv,
scratch_dir: types.AnyPath,
expert_policy: policies.BasePolicy,
expert_trajs: Optional[Sequence[types.Trajectory]] = None,
**dagger_trainer_kwargs,
):
"""Builds SimpleDAggerTrainer.
Args:
venv: Vectorized training environment. Note that when the robot
action is randomly injected (in accordance with `beta_schedule`
argument), every individual environment will get a robot action
simultaneously for that timestep.
scratch_dir: Directory to use to store intermediate training
information (e.g. for resuming training).
expert_policy: The expert policy used to generate synthetic demonstrations.
expert_trajs: Optional starting dataset that is inserted into the round 0
dataset.
dagger_trainer_kwargs: Other keyword arguments passed to the
superclass initializer `DAggerTrainer.__init__`.
Raises:
ValueError: The observation or action space does not match between
`venv` and `expert_policy`.
"""
super().__init__(venv=venv, scratch_dir=scratch_dir, **dagger_trainer_kwargs)
self.expert_policy = expert_policy
if expert_policy.observation_space != self.venv.observation_space:
raise ValueError(
"Mismatched observation space between expert_policy and venv",
)
if expert_policy.action_space != self.venv.action_space:
raise ValueError("Mismatched action space between expert_policy and venv")
# TODO(shwang):
# Might welcome Transitions and DataLoaders as sources of expert data
# in the future too, but this will require some refactoring, so for
# now we just have `expert_trajs`.
if expert_trajs is not None:
# Save each initial expert trajectory into the "round 0" demonstration
# data directory.
for traj in expert_trajs:
_save_dagger_demo(
traj,
self._demo_dir_path_for_round(),
prefix="initial_data",
)
def train(
self,
total_timesteps: int,
*,
rollout_round_min_episodes: int = 3,
rollout_round_min_timesteps: int = 500,
bc_train_kwargs: Optional[dict] = None,
) -> None:
"""Train the DAgger agent.
The agent is trained in "rounds" where each round consists of a dataset
aggregation step followed by BC update step.
During a dataset aggregation step, `self.expert_policy` is used to perform
rollouts in the environment but there is a `1 - beta` chance (beta is
determined from the round number and `self.beta_schedule`) that the DAgger
agent's action is used instead. Regardless of whether the DAgger agent's action
is used during the rollout, the expert action and corresponding observation are
always appended to the dataset. The number of environment steps in the
dataset aggregation stage is determined by the `rollout_round_min*` arguments.
During a BC update step, `BC.train()` is called to update the DAgger agent on
all data collected so far.
Args:
total_timesteps: The number of timesteps to train inside the environment.
In practice this is a lower bound, because the number of timesteps is
rounded up to finish the minimum number of episdoes or timesteps in the
last DAgger training round, and the environment timesteps are executed
in multiples of `self.venv.num_envs`.
rollout_round_min_episodes: The number of episodes the must be completed
completed before a dataset aggregation step ends.
rollout_round_min_timesteps: The number of environment timesteps that must
be completed before a dataset aggregation step ends. Also, that any
round will always train for at least `self.batch_size` timesteps,
because otherwise BC could fail to receive any batches.
bc_train_kwargs: Keyword arguments for calling `BC.train()`. If
the `log_rollouts_venv` key is not provided, then it is set to
`self.venv` by default. If neither of the `n_epochs` and `n_batches`
keys are provided, then `n_epochs` is set to `self.DEFAULT_N_EPOCHS`.
"""
total_timestep_count = 0
round_num = 0
while total_timestep_count < total_timesteps:
collector = self.get_trajectory_collector()
round_episode_count = 0
round_timestep_count = 0
sample_until = rollout.make_sample_until(
min_timesteps=max(rollout_round_min_timesteps, self.batch_size),
min_episodes=rollout_round_min_episodes,
)
trajectories = rollout.generate_trajectories(
policy=self.expert_policy,
venv=collector,
sample_until=sample_until,
deterministic_policy=True,
rng=collector.rng,
)
for traj in trajectories:
_save_dagger_demo(traj, collector.save_dir)
self._logger.record_mean(
"dagger/mean_episode_reward",
np.sum(traj.rews),
)
round_timestep_count += len(traj)
total_timestep_count += len(traj)
round_episode_count += len(trajectories)
self._logger.record("dagger/total_timesteps", total_timestep_count)
self._logger.record("dagger/round_num", round_num)
self._logger.record("dagger/round_episode_count", round_episode_count)
self._logger.record("dagger/round_timestep_count", round_timestep_count)
# `logger.dump` is called inside BC.train within the following fn call:
self.extend_and_update(bc_train_kwargs)
round_num += 1
| [
"torch.save",
"torch.utils.data.DataLoader"
] | 1.4.0 | jadecastro/imitation | e05d7f5a4adfb021699647b80576c74ba6bd9443 |
0.0 | import os
os.environ['PYOPENGL_PLATFORM'] = 'osmesa'
import glob
import numpy as np
import torch
import yaml
import trimesh
import numbers
import pickle as pkl
from torch.utils import data
from scipy.spatial import cKDTree as KDTree
from scipy.spatial.transform import Rotation as R
# from human_body_prior.mesh import MeshViewer
from im2mesh.utils.libmesh import check_mesh_contains
SMPL2IPNET_IDX = np.array([11, 12, 13, 11, 3, 8, 11, 1, 6, 11, 1, 6, 0, 11, 11, 0, 5, 10, 4, 9, 2, 7, 2, 7])
IPNET2SMPL_IDX = np.array([12, 7, 20, 4, 18, 16, 8, 21, 5, 19, 17, 0, 1, 2])
SMPL_parents = np.array([-1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14,
16, 17, 18, 19, 20, 21], dtype=np.int32)
IPNet_parents = np.array([11, 3, 4, 12, 5, 11, 8, 9, 13, 10, 11, -1, 11, 11], dtype=np.int32)
IPNet_parents_in_SMPL = np.array([9, 4, 18, 1, 16, 13, 5, 19, 2, 17, 14, -1, 0, 0], dtype=np.int32)
''' Copied from IPNet'''
def get_3DSV(mesh):
from opendr.camera import ProjectPoints
from opendr.renderer import DepthRenderer
WIDTH, HEIGHT = 250, 250
rt = R.from_euler('xyz', [np.pi, 0, 0]).as_rotvec()
rt_mat = R.from_euler('xyz', [np.pi, 0, 0]).as_matrix()
camera = ProjectPoints(v=mesh.vertices, f=np.array([WIDTH, WIDTH]), c=np.array([WIDTH, HEIGHT]) / 2.,
t=np.array([0, 0, 3.0]), rt=rt, k=np.zeros(5))
frustum = {'near': 1., 'far': 10., 'width': WIDTH, 'height': HEIGHT}
rn = DepthRenderer(camera=camera, frustum=frustum, f=mesh.faces, overdraw=False)
# import cv2
depth_image = rn.depth_image.copy()
mask = depth_image < depth_image.max() - 0.01
depth_image[~mask] = 0
depth_image[mask] = 255 - (depth_image[mask] - depth_image[mask].min()) / (depth_image[mask].max() - depth_image[mask].min()) * 255
points3d = camera.unproject_depth_image(rn.r)
mask = points3d[:, :, 2] > np.min(points3d[:, :, 2]) + 0.01
points3d = points3d[mask]
return points3d, depth_image
class CAPESingleViewDataset(data.Dataset):
''' CAPE dataset class.
'''
def __init__(self, dataset_folder,
subjects=['00032', '00096', '00122', '00127', '00134', '00145', '00159', '00215', '02474', '03223', '03284', '03331', '03375', '03383', '03394'],
mode='train',
input_type='pointcloud',
voxel_res=128,
double_layer=False,
use_aug=False,
use_v_template=False,
use_abs_bone_transforms=False,
num_joints=24,
input_pointcloud_n=5000,
input_pointcloud_noise=0.001,
points_size=2048,
points_uniform_ratio=0.5,
use_global_trans=False,
normalized_scale=False,
query_on_clothed=False,
points_sigma=0.05,
sequence_idx=None,
subject_idx=None):
''' Initialization of the the 3D shape dataset.
Args:
dataset_folder (str): dataset folder
'''
# Attributes
self.cape_path = '/cluster/home/shawang/Datasets/CAPE'
self.dataset_folder = dataset_folder
self.subjects = subjects
self.use_global_trans = use_global_trans
self.use_aug = use_aug
self.mode = mode
self.normalized_scale = normalized_scale
self.input_type = input_type
self.voxel_res = voxel_res
self.double_layer = double_layer
self.num_joints = num_joints
self.query_on_clothed = query_on_clothed
self.use_abs_bone_transforms = use_abs_bone_transforms
# if normalized_scale:
# assert ( not use_global_trans )
self.points_size = points_size if self.mode in ['train', 'test'] else 100000
# self.points_padding = 0.1
self.points_padding = 1 / 3 # For IPNet, mesh is normalized to [-0.75, 0.75] while sampling space is [-1, 1]
self.points_uniform_ratio = points_uniform_ratio if self.mode in ['train', 'test'] else 0.5
self.points_sigma = points_sigma # 5cm standard deviation for surface points
if input_type == 'pointcloud':
self.input_pointcloud_n = input_pointcloud_n
self.input_pointcloud_noise = input_pointcloud_noise
else:
self.input_pointcloud_n = self.input_pointcloud_noise = 0
self.faces = np.load('body_models/misc/faces.npz')['faces']
self.skinning_weights = dict(np.load('body_models/misc/skinning_weights_all.npz'))
self.posedirs = dict(np.load('body_models/misc/posedirs_all.npz'))
if self.use_abs_bone_transforms:
self.J_regressors = dict(np.load('body_models/misc/J_regressors.npz'))
with open('body_models/misc/smpl_parts_dense.pkl', 'rb') as f:
part_labels = pkl.load(f)
labels = np.zeros(6890, dtype=np.int32)
for idx, k in enumerate(part_labels):
labels[part_labels[k]] = idx
self.part_labels = labels
self.use_v_template = use_v_template
if use_v_template:
self.v_templates = dict(np.load('body_models/misc/v_templates.npz'))
# Get all data
self.data = []
if subject_idx is not None:
subjects = [subjects[subject_idx]]
with open(os.path.join(self.cape_path, 'cape_release/misc/subj_genders.pkl'), 'rb') as f:
genders = pkl.load(f)
for subject in subjects:
subject_dir = os.path.join(dataset_folder, subject)
sequence_dirs = glob.glob(os.path.join(subject_dir, '*'))
sequences = set()
for sequence_dir in sequence_dirs:
sequences.add(os.path.basename(sequence_dir).split('.')[0])
sequences = sorted(list(sequences))
if sequence_idx is not None:
sequences = [sequences[sequence_idx]]
for sequence in sequences:
points_dir = os.path.join(subject_dir, sequence)
points_files = sorted(glob.glob(os.path.join(points_dir, '*.npz')))
self.data += [
{'subset': 'cape',
'subject': subject,
'gender': genders[subject],
'sequence': sequence,
'data_path': points_file}
for points_file in points_files
]
# self.data = [self.data[200]]
def augm_params(self):
"""Get augmentation parameters."""
if self.mode == 'train' and self.use_aug:
# The rotation is a number in the area [-2*rotFactor, 2*rotFactor]
# Roll
rot_x = min(2*90,
max(-2*90, np.random.randn()*90))
sn, cs = np.sin(np.pi / 180 * rot_x), np.cos(np.pi / 180 * rot_x)
rot_x = np.eye(4)
rot_x[1, 1] = cs
rot_x[1, 2] = -sn
rot_x[2, 1] = sn
rot_x[2, 2] = cs
rot_y = min(2*90,
max(-2*90, np.random.randn()*90))
# Pitch
sn, cs = np.sin(np.pi / 180 * rot_y), np.cos(np.pi / 180 * rot_y)
rot_y = np.eye(4)
rot_y[0, 0] = cs
rot_y[0, 2] = sn
rot_y[2, 0] = -sn
rot_y[2, 2] = cs
rot_z = min(2*90,
max(-2*90, np.random.randn()*90))
# Yaw
sn, cs = np.sin(np.pi / 180 * rot_z), np.cos(np.pi / 180 * rot_z)
rot_z = np.eye(4)
rot_z[0, 0] = cs
rot_z[0, 1] = -sn
rot_z[1, 0] = sn
rot_z[1, 1] = cs
rot_mat = np.dot(rot_x, np.dot(rot_y, rot_z))
# but it is identity with probability 3/5
if np.random.uniform() <= 0.6:
rot_mat = np.eye(4)
else:
rot_mat = np.eye(4)
return rot_mat
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.data)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data_path = self.data[idx]['data_path']
subject = self.data[idx]['subject']
gender = self.data[idx]['gender']
data = {}
aug_rot = self.augm_params().astype(np.float32)
points_dict = np.load(data_path)
# 3D models and points
loc = points_dict['loc'].astype(np.float32)
trans = points_dict['trans'].astype(np.float32)
root_loc = points_dict['Jtr'][0].astype(np.float32)
scale = points_dict['scale'].astype(np.float32)
# Also get GT SMPL poses
pose_body = points_dict['pose_body']
pose_hand = points_dict['pose_hand']
pose = np.concatenate([pose_body, pose_hand], axis=-1)
pose = R.from_rotvec(pose.reshape([-1, 3]))
body_mesh_a_pose = points_dict['a_pose_mesh_points']
# Break symmetry if given in float16:
if body_mesh_a_pose.dtype == np.float16:
body_mesh_a_pose = body_mesh_a_pose.astype(np.float32)
body_mesh_a_pose += 1e-4 * np.random.randn(*body_mesh_a_pose.shape)
else:
body_mesh_a_pose = body_mesh_a_pose.astype(np.float32)
n_smpl_points = body_mesh_a_pose.shape[0]
bone_transforms = points_dict['bone_transforms'].astype(np.float32)
# Apply rotation augmentation to bone transformations
bone_transforms_aug = np.matmul(np.expand_dims(aug_rot, axis=0), bone_transforms)
bone_transforms_aug[:, :3, -1] += root_loc - trans - np.dot(aug_rot[:3, :3], root_loc - trans)
bone_transforms = bone_transforms_aug
# Get augmented posed-mesh
skinning_weights = self.skinning_weights[gender]
if self.use_abs_bone_transforms:
J_regressor = self.J_regressors[gender]
T = np.dot(skinning_weights, bone_transforms.reshape([-1, 16])).reshape([-1, 4, 4])
homogen_coord = np.ones([n_smpl_points, 1], dtype=np.float32)
a_pose_homo = np.concatenate([body_mesh_a_pose - trans, homogen_coord], axis=-1).reshape([n_smpl_points, 4, 1])
body_mesh = np.matmul(T, a_pose_homo)[:, :3, 0].astype(np.float32) + trans
posed_trimesh = trimesh.Trimesh(vertices=body_mesh, faces=self.faces)
input_pointcloud, _ = get_3DSV(posed_trimesh)
noise = self.input_pointcloud_noise * np.random.randn(*input_pointcloud.shape)
input_pointcloud = (input_pointcloud + noise).astype(np.float32)
# Get extents of model.
bb_min = np.min(input_pointcloud, axis=0)
bb_max = np.max(input_pointcloud, axis=0)
# total_size = np.sqrt(np.square(bb_max - bb_min).sum())
total_size = (bb_max - bb_min).max()
# Scales all dimensions equally.
scale = max(1.6, total_size) # 1.6 is the magic number from IPNet
loc = np.array(
[(bb_min[0] + bb_max[0]) / 2,
(bb_min[1] + bb_max[1]) / 2,
(bb_min[2] + bb_max[2]) / 2],
dtype=np.float32
)
if self.input_pointcloud_n <= input_pointcloud.shape[0]:
rand_inds = np.random.choice(input_pointcloud.shape[0], size=self.input_pointcloud_n, replace=False)
else:
rand_inds = np.random.choice(input_pointcloud.shape[0], size=self.input_pointcloud_n, replace=True)
input_pointcloud = input_pointcloud[rand_inds, :]
n_points_uniform = int(self.points_size * self.points_uniform_ratio)
n_points_surface = self.points_size - n_points_uniform
boxsize = 1 + self.points_padding
points_uniform = np.random.rand(n_points_uniform, 3)
points_uniform = boxsize * (points_uniform - 0.5)
# Scale points in (padded) unit box back to the original space
points_uniform *= scale
points_uniform += loc
# Sample points around posed-mesh surface
n_points_surface_cloth = n_points_surface // 2 if self.double_layer else n_points_surface
points_surface = posed_trimesh.sample(n_points_surface_cloth)
points_surface += np.random.normal(scale=self.points_sigma, size=points_surface.shape)
if self.double_layer:
n_points_surface_minimal = n_points_surface // 2
posedir = self.posedirs[gender]
minimal_shape_path = os.path.join(self.cape_path, 'cape_release', 'minimal_body_shape', subject, subject + '_minimal.npy')
minimal_shape = np.load(minimal_shape_path)
pose_mat = pose.as_matrix()
ident = np.eye(3)
pose_feature = (pose_mat - ident).reshape([207, 1])
pose_offsets = np.dot(posedir.reshape([-1, 207]), pose_feature).reshape([6890, 3])
minimal_shape += pose_offsets
if self.use_abs_bone_transforms:
Jtr_cano = np.dot(J_regressor, minimal_shape)
Jtr_cano = Jtr_cano[IPNET2SMPL_IDX, :]
a_pose_homo = np.concatenate([minimal_shape, homogen_coord], axis=-1).reshape([n_smpl_points, 4, 1])
minimal_body_mesh = np.matmul(T, a_pose_homo)[:, :3, 0].astype(np.float32) + trans
minimal_posed_trimesh = trimesh.Trimesh(vertices=minimal_body_mesh, faces=self.faces)
# Sample points around minimally clothed posed-mesh surface
points_surface_minimal = minimal_posed_trimesh.sample(n_points_surface_minimal)
points_surface_minimal += np.random.normal(scale=self.points_sigma, size=points_surface_minimal.shape)
points_surface = np.vstack([points_surface, points_surface_minimal])
# Check occupancy values for sampled ponits
query_points = np.vstack([points_uniform, points_surface]).astype(np.float32)
if self.double_layer:
# Double-layer occupancies, as was done in IPNet
# 0: outside, 1: between body and cloth, 2: inside body mesh
occupancies_cloth = check_mesh_contains(posed_trimesh, query_points)
occupancies_minimal = check_mesh_contains(minimal_posed_trimesh, query_points)
occupancies = occupancies_cloth.astype(np.int64)
occupancies[occupancies_minimal] = 2
else:
occupancies = check_mesh_contains(posed_trimesh, query_points).astype(np.float32)
# Skinning inds by querying nearest SMPL vertex on the clohted mesh
kdtree = KDTree(body_mesh if self.query_on_clothed else minimal_body_mesh)
_, p_idx = kdtree.query(query_points)
pts_W = skinning_weights[p_idx, :]
skinning_inds_ipnet = self.part_labels[p_idx] # skinning inds (14 parts)
skinning_inds_smpl = pts_W.argmax(1) # full skinning inds (24 parts)
if self.num_joints == 14:
skinning_inds = skinning_inds_ipnet
else:
skinning_inds = skinning_inds_smpl
# Invert LBS to get query points in A-pose space
T = np.dot(pts_W, bone_transforms.reshape([-1, 16])).reshape([-1, 4, 4])
T = np.linalg.inv(T)
homogen_coord = np.ones([self.points_size, 1], dtype=np.float32)
posed_homo = np.concatenate([query_points - trans, homogen_coord], axis=-1).reshape([self.points_size, 4, 1])
query_points_a_pose = np.matmul(T, posed_homo)[:, :3, 0].astype(np.float32) + trans
if self.use_abs_bone_transforms:
assert (not self.use_v_template and self.num_joints == 24)
query_points_a_pose -= Jtr_cano[SMPL2IPNET_IDX[skinning_inds], :]
if self.use_v_template:
v_template = self.v_templates[gender]
pose_shape_offsets = v_template - minimal_shape
query_points_template = query_points_a_pose + pose_shape_offsets[p_idx, :]
sc_factor = 1.0 / scale * 1.5 if self.normalized_scale else 1.0 # 1.5 is the magic number from IPNet
offset = loc
bone_transforms_inv = bone_transforms.copy()
bone_transforms_inv[:, :3, -1] += trans - loc
bone_transforms_inv = np.linalg.inv(bone_transforms_inv)
bone_transforms_inv[:, :3, -1] *= sc_factor
data = {
None: (query_points - offset) * sc_factor,
'occ': occupancies,
'trans': trans,
'root_loc': root_loc,
'pts_a_pose': (query_points_a_pose - (trans if self.use_global_trans else offset)) * sc_factor,
'skinning_inds': skinning_inds,
'skinning_inds_ipnet': skinning_inds_ipnet,
'skinning_inds_smpl': skinning_inds_smpl,
'loc': loc,
'scale': scale,
'bone_transforms': bone_transforms,
'bone_transforms_inv': bone_transforms_inv,
}
if self.use_v_template:
data.update({'pts_template': (query_points_template - (trans if self.use_global_trans else offset)) * sc_factor})
if self.mode in ['test']:
data.update({'smpl_vertices': body_mesh, 'smpl_a_pose_vertices': body_mesh_a_pose})
if self.double_layer:
data.update({'minimal_smpl_vertices': minimal_body_mesh})
data_out = {}
field_name = 'points' if self.mode in ['train', 'test'] else 'points_iou'
for k, v in data.items():
if k is None:
data_out[field_name] = v
else:
data_out['%s.%s' % (field_name, k)] = v
if self.input_type == 'pointcloud':
data_out.update(
{'inputs': (input_pointcloud - offset) * sc_factor,
'idx': idx,
}
)
elif self.input_type == 'voxel':
voxels = np.unpackbits(points_dict['voxels_occ']).astype(np.float32)
voxels = np.reshape(voxels, [self.voxel_res] * 3)
data_out.update(
{'inputs': voxels,
'idx': idx,
}
)
else:
raise ValueError('Unsupported input type: {}'.format(self.input_type))
return data_out
def get_model_dict(self, idx):
return self.data[idx]
| [
"torch.utils.data.items",
"torch.utils.data.update"
] | 0.0.8 | taconite/PTF | a8789c9f752aea2944c2a75e04cc2aa21c7e4a00 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, The HugginFace Inc. team and University of Washington.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import logging
import random
import h5py
import six
import torch
from tqdm import tqdm
import copy
import numpy as np
import tokenization
from post import _improve_answer_span, _check_is_max_context
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class SquadExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self,
qas_id=None,
question_text=None,
doc_tokens=None,
orig_answer_text=None,
start_position=None,
end_position=None,
title="",
doc_idx=0,
pid=0):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.title = title
self.doc_idx = doc_idx
self.pid = pid
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
return s
class ContextFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
start_position=None,
end_position=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.start_position = start_position
self.end_position = end_position
class QuestionFeatures(object):
def __init__(self,
unique_id,
example_index,
input_ids,
input_mask,
tokens):
self.unique_id = unique_id
self.example_index = example_index
self.input_ids = input_ids
self.input_mask = input_mask
self.tokens = tokens
def read_squad_examples(input_file, is_training, context_only=False, question_only=False,
draft=False, draft_num_examples=12, tokenizer=None):
"""Read a SQuAD json file into a list of SquadExample."""
print("reading", input_file)
with open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
examples = []
for doc_idx, entry in enumerate(input_data):
title = entry['title']
for pid, paragraph in enumerate(entry["paragraphs"]):
if not question_only:
paragraph_text = paragraph["context"]
doc_tokens, char_to_word_offset = context_to_tokens_and_offset(paragraph_text, tokenizer=tokenizer)
if context_only:
example = SquadExample(
doc_tokens=doc_tokens,
title=title,
doc_idx=doc_idx,
pid=pid)
examples.append(example)
if draft and len(examples) == draft_num_examples:
return examples
continue
else:
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
if is_training:
if False: # len(qa["answers"]) > 1:
raise ValueError(
"For training, each question should have exactly 1 answer.")
elif len(qa["answers"]) == 0:
orig_answer_text = ""
start_position = -1
end_position = -1
else:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
if question_only:
example = SquadExample(
qas_id=qas_id,
question_text=question_text)
else:
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
title=title,
pid=pid)
examples.append(example)
if draft and len(examples) == draft_num_examples:
return examples
return examples
# This is for training and direct evaluation (slow eval)
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
question_features = []
for (example_index, example) in enumerate(tqdm(examples, desc='converting')):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - 2
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
tokens_ = []
token_to_orig_map = {}
token_is_max_context = {}
tokens.append("[CLS]")
tokens_.append("[CLS]")
for token in query_tokens:
tokens_.append(token)
tokens_.append("[SEP]")
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
tokens.append("[SEP]")
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids_ = tokenizer.convert_tokens_to_ids(tokens_)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
input_mask_ = [1] * len(input_ids_)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
while len(input_ids_) < max_query_length + 2:
input_ids_.append(0)
input_mask_.append(0)
assert len(input_ids_) == max_query_length + 2
assert len(input_mask_) == max_query_length + 2
start_position = None
end_position = None
if example.start_position is not None and example.start_position < 0:
start_position, end_position = -1, -1
elif is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
if (example.start_position < doc_start or
example.end_position < doc_start or
example.start_position > doc_end or example.end_position > doc_end):
continue
doc_offset = 1
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
logger.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
if is_training:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
features.append(
ContextFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
start_position=start_position,
end_position=end_position))
question_features.append(
QuestionFeatures(
unique_id=unique_id,
example_index=example_index,
input_ids=input_ids_,
input_mask=input_mask_,
tokens=tokens_))
unique_id += 1
return features, question_features
# This is for embedding questions
def convert_questions_to_features(examples, tokenizer, max_query_length=None):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
question_features = []
for (example_index, example) in enumerate(tqdm(examples, desc='converting')):
query_tokens = tokenizer.tokenize(example.question_text)
if max_query_length is None:
max_query_length = len(query_tokens)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
for _ in enumerate(range(1)):
tokens_ = []
tokens_.append("[CLS]")
for token in query_tokens:
tokens_.append(token)
tokens_.append("[SEP]")
input_ids_ = tokenizer.convert_tokens_to_ids(tokens_)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask_ = [1] * len(input_ids_)
# Zero-pad up to the sequence length.
while len(input_ids_) < max_query_length + 2:
input_ids_.append(0)
input_mask_.append(0)
assert len(input_ids_) == max_query_length + 2
assert len(input_mask_) == max_query_length + 2
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in query_tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids_]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask_]))
question_features.append(
QuestionFeatures(
unique_id=unique_id,
example_index=example_index,
input_ids=input_ids_,
input_mask=input_mask_,
tokens=tokens_))
unique_id += 1
return question_features
def convert_documents_to_features(examples, tokenizer, max_seq_length, doc_stride):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(tqdm(examples, desc='converting')):
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - 2
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
tokens.append("[CLS]")
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
tokens.append("[SEP]")
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
logger.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
features.append(
ContextFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask))
unique_id += 1
return features
def _context_to_tokens_and_offset(context):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in context:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
return doc_tokens, char_to_word_offset
def context_to_tokens_and_offset(context, tokenizer=None):
if tokenizer is None:
return _context_to_tokens_and_offset(context)
# Tokenizer must be content-preserving (e.g. PTB changes " to '', which is not acceptable)
doc_tokens = tokenizer(context)
char_to_word_offset = []
cur_pos = 0
for word_pos, token in enumerate(doc_tokens):
new_pos = context.find(token, cur_pos)
# set previous word's offset
assert cur_pos >= 0, "cannot find `%s` in `%s`" % (token, context)
char_to_word_offset.extend([max(0, word_pos - 1)] * (new_pos - cur_pos))
assert context[len(char_to_word_offset)] == token[0]
char_to_word_offset.extend([word_pos] * len(token))
cur_pos = new_pos + len(token)
return doc_tokens, char_to_word_offset
def inject_noise(input_ids, input_mask,
clamp=False, clamp_prob=0.5, min_len=0, max_len=300, pad=0,
replace=False, replace_prob=0.3, unk_prob=0.1, vocab_size=30522, unk=100, min_id=999,
shuffle=False, shuffle_prob=0.2):
input_ids = input_ids[:]
input_mask = input_mask[:]
if clamp and random.random() < clamp_prob:
len_ = sum(input_mask) - 2
new_len = random.choice(range(min_len, max_len + 1))
if new_len < len_:
input_ids[new_len + 1] = input_ids[len_ + 1]
for i in range(new_len + 2, len(input_ids)):
input_ids[i] = pad
input_mask[i] = 0
len_ = sum(input_mask) - 2
if replace:
for i in range(1, len_ + 1):
if random.random() < replace_prob:
if random.random() < unk_prob:
new_id = unk
else:
new_id = random.choice(range(min_id, vocab_size))
input_ids[i] = new_id
if shuffle:
for i in range(1, len_ + 1):
if random.random() < shuffle_prob:
new_id = random.choice(input_ids[1:len_ + 1])
input_ids[i] = new_id
return input_ids, input_mask
def inject_noise_to_neg_features(features,
clamp=False, clamp_prob=1.0, min_len=0, max_len=300, pad=0,
replace=False, replace_prob=1.0, unk_prob=1.0, vocab_size=30522, unk=100, min_id=999,
shuffle=False, shuffle_prob=1.0):
features = copy.deepcopy(features)
input_ids = features.input_ids
input_mask = features.input_mask
if clamp and random.random() < clamp_prob:
len_ = sum(input_mask) - 2
new_len = random.choice(range(min_len, min(len_, max_len) + 1))
input_ids[new_len + 1] = input_ids[len_ + 1]
for i in range(new_len + 2, len(input_ids)):
input_ids[i] = pad
input_mask[i] = 0
len_ = sum(input_mask) - 2
if replace:
for i in range(1, len_ + 1):
if random.random() < replace_prob:
if random.random() < unk_prob:
new_id = unk
else:
new_id = random.choice(range(min_id, vocab_size))
input_ids[i] = new_id
if shuffle:
for i in range(1, len_ + 1):
if random.random() < shuffle_prob:
new_id = random.choice(input_ids[1:len_ + 1])
input_ids[i] = new_id
return features
def inject_noise_to_neg_features_list(features_list, noise_prob=1.0, **kwargs):
out = [inject_noise_to_neg_features(features, **kwargs) if random.random() < noise_prob
else features for features in features_list]
return out
def sample_similar_questions(examples, features, question_emb_file, cuda=False):
with h5py.File(question_emb_file, 'r') as fp:
ids = []
mats = []
for id_, mat in fp.items():
ids.append(id_)
mats.append(mat[:])
id2idx = {id_: idx for idx, id_ in enumerate(ids)}
large_mat = np.concatenate(mats, axis=0)
large_mat = torch.tensor(large_mat).float()
if cuda:
large_mat = large_mat.to(torch.device('cuda'))
"""
sim = large_mat.matmul(large_mat.t())
sim_argsort = (-sim).argsort(dim=1).cpu().numpy()
"""
id2features = collections.defaultdict(list)
for feature in features:
id_ = examples[feature.example_index].qas_id
id2features[id_].append(feature)
sampled_features = []
for feature in tqdm(features, desc='sampling'):
example = examples[feature.example_index]
example_tup = (example.title, example.doc_idx, example.pid)
id_ = example.qas_id
idx = id2idx[id_]
similar_feature = None
sim = (large_mat.matmul(large_mat[idx:idx+1, :].t()).squeeze(1))
sim_argsort = (-sim).argsort(dim=0).cpu().numpy()
for target_idx in sim_argsort:
target_features = id2features[ids[target_idx]]
for target_feature in target_features:
target_example = examples[target_feature.example_index]
target_tup = (target_example.title, target_example.doc_idx, target_example.pid)
if example_tup != target_tup:
similar_feature = target_feature
break
if similar_feature is not None:
break
assert similar_feature is not None
sampled_features.append(similar_feature)
return sampled_features
| [
"torch.device",
"torch.tensor"
] | 1.0 | soheeyang/denspi | f540b6a547f012823fc6c2bb10077df6bccc13a6 |
0.3 | import torch
from torch import nn, optim
from torch.nn import functional as F
EPS = 1e-12
class VAE(nn.Module):
def __init__(self, img_size, latent_spec, temperature=.67, use_cuda=False):
"""
Class which defines model and forward pass.
Parameters
----------
img_size : tuple of ints
Size of images. E.g. (1, 32, 32) or (3, 64, 64).
latent_spec : dict
Specifies latent distribution. For example:
{'cont': 10, 'disc': [10, 4, 3]} encodes 10 normal variables and
3 gumbel softmax variables of dimension 10, 4 and 3. A latent spec
can include both 'cont' and 'disc' or only 'cont' or only 'disc'.
temperature : float
Temperature for gumbel softmax distribution.
use_cuda : bool
If True moves model to GPU
"""
super(VAE, self).__init__()
self.use_cuda = use_cuda
# Parameters
self.img_size = img_size
self.is_continuous = 'cont' in latent_spec
self.is_discrete = 'disc' in latent_spec
self.latent_spec = latent_spec
self.num_pixels = img_size[1] * img_size[2]
self.temperature = temperature
self.hidden_dim = 256 # Hidden dimension of linear layer
self.reshape = (64, 4, 4) # Shape required to start transpose convs
# Calculate dimensions of latent distribution
self.latent_cont_dim = 0
self.latent_disc_dim = 0
self.num_disc_latents = 0
if self.is_continuous:
self.latent_cont_dim = self.latent_spec['cont']
if self.is_discrete:
self.latent_disc_dim += sum([dim for dim in self.latent_spec['disc']])
self.num_disc_latents = len(self.latent_spec['disc'])
self.latent_dim = self.latent_cont_dim + self.latent_disc_dim
# Define encoder layers
# Intial layer
encoder_layers = [
nn.Conv2d(self.img_size[0], 32, (4, 4), stride=2, padding=1),
nn.ReLU()
]
# Add additional layer if (64, 64) images
if self.img_size[1:] == (64, 64):
encoder_layers += [
nn.Conv2d(32, 32, (4, 4), stride=2, padding=1),
nn.ReLU()
]
elif self.img_size[1:] == (32, 32):
# (32, 32) images are supported but do not require an extra layer
pass
else:
raise RuntimeError("{} sized images not supported. Only (None, 32, 32) and (None, 64, 64) supported. Build your own architecture or reshape images!".format(img_size))
# Add final layers
encoder_layers += [
nn.Conv2d(32, 64, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, (4, 4), stride=2, padding=1),
nn.ReLU()
]
# Define encoder
self.img_to_features = nn.Sequential(*encoder_layers)
# Map encoded features into a hidden vector which will be used to
# encode parameters of the latent distribution
self.features_to_hidden = nn.Sequential(
nn.Linear(64 * 4 * 4, self.hidden_dim),
nn.ReLU()
)
# Encode parameters of latent distribution
if self.is_continuous:
self.fc_mean = nn.Linear(self.hidden_dim, self.latent_cont_dim)
self.fc_log_var = nn.Linear(self.hidden_dim, self.latent_cont_dim)
if self.is_discrete:
# Linear layer for each of the categorical distributions
fc_alphas = []
for disc_dim in self.latent_spec['disc']:
fc_alphas.append(nn.Linear(self.hidden_dim, disc_dim))
self.fc_alphas = nn.ModuleList(fc_alphas)
# Map latent samples to features to be used by generative model
self.latent_to_features = nn.Sequential(
nn.Linear(self.latent_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, 64 * 4 * 4),
nn.ReLU()
)
# Define decoder
decoder_layers = []
# Additional decoding layer for (64, 64) images
if self.img_size[1:] == (64, 64):
decoder_layers += [
nn.ConvTranspose2d(64, 64, (4, 4), stride=2, padding=1),
nn.ReLU()
]
decoder_layers += [
nn.ConvTranspose2d(64, 32, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, (4, 4), stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, self.img_size[0], (4, 4), stride=2, padding=1),
nn.Sigmoid()
]
# Define decoder
self.features_to_img = nn.Sequential(*decoder_layers)
def encode(self, x):
"""
Encodes an image into parameters of a latent distribution defined in
self.latent_spec.
Parameters
----------
x : torch.Tensor
Batch of data, shape (N, C, H, W)
"""
batch_size = x.size()[0]
# Encode image to hidden features
features = self.img_to_features(x)
hidden = self.features_to_hidden(features.view(batch_size, -1))
# Output parameters of latent distribution from hidden representation
latent_dist = {}
if self.is_continuous:
latent_dist['cont'] = [self.fc_mean(hidden), self.fc_log_var(hidden)]
if self.is_discrete:
latent_dist['disc'] = []
for fc_alpha in self.fc_alphas:
latent_dist['disc'].append(F.softmax(fc_alpha(hidden), dim=1))
return latent_dist
def reparameterize(self, latent_dist):
"""
Samples from latent distribution using the reparameterization trick.
Parameters
----------
latent_dist : dict
Dict with keys 'cont' or 'disc' or both, containing the parameters
of the latent distributions as torch.Tensor instances.
"""
latent_sample = []
if self.is_continuous:
mean, logvar = latent_dist['cont']
cont_sample = self.sample_normal(mean, logvar)
latent_sample.append(cont_sample)
if self.is_discrete:
for alpha in latent_dist['disc']:
disc_sample = self.sample_gumbel_softmax(alpha)
latent_sample.append(disc_sample)
# Concatenate continuous and discrete samples into one large sample
return torch.cat(latent_sample, dim=1)
def sample_normal(self, mean, logvar):
"""
Samples from a normal distribution using the reparameterization trick.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (N, D) where D is dimension
of distribution.
logvar : torch.Tensor
Diagonal log variance of the normal distribution. Shape (N, D)
"""
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.zeros(std.size()).normal_()
if self.use_cuda:
eps = eps.cuda()
return mean + std * eps
else:
# Reconstruction mode
return mean
def sample_gumbel_softmax(self, alpha):
"""
Samples from a gumbel-softmax distribution using the reparameterization
trick.
Parameters
----------
alpha : torch.Tensor
Parameters of the gumbel-softmax distribution. Shape (N, D)
"""
if self.training:
# Sample from gumbel distribution
unif = torch.rand(alpha.size())
if self.use_cuda:
unif = unif.cuda()
gumbel = -torch.log(-torch.log(unif + EPS) + EPS)
# Reparameterize to create gumbel softmax sample
log_alpha = torch.log(alpha + EPS)
logit = (log_alpha + gumbel) / self.temperature
return F.softmax(logit, dim=1)
else:
# In reconstruction mode, pick most likely sample
_, max_alpha = torch.max(alpha, dim=1)
one_hot_samples = torch.zeros(alpha.size())
# On axis 1 of one_hot_samples, scatter the value 1 at indices
# max_alpha. Note the view is because scatter_ only accepts 2D
# tensors.
one_hot_samples.scatter_(1, max_alpha.view(-1, 1).data.cpu(), 1)
if self.use_cuda:
one_hot_samples = one_hot_samples.cuda()
return one_hot_samples
def decode(self, latent_sample):
"""
Decodes sample from latent distribution into an image.
Parameters
----------
latent_sample : torch.Tensor
Sample from latent distribution. Shape (N, L) where L is dimension
of latent distribution.
"""
features = self.latent_to_features(latent_sample)
return self.features_to_img(features.view(-1, *self.reshape))
def forward(self, x):
"""
Forward pass of model.
Parameters
----------
x : torch.Tensor
Batch of data. Shape (N, C, H, W)
"""
latent_dist = self.encode(x)
latent_sample = self.reparameterize(latent_dist)
return self.decode(latent_sample), latent_dist
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.max",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.log",
"torch.exp"
] | 0.3.1 | 7wik/joint-vae | 4ba1ba7c202cb7c2dd8f1467e1f1b82ef5efd344 |
1.7 | # Loss functions
import torch
import torch.nn as nn
from utils.metrics import bbox_iou
from utils.torch_utils import is_parallel
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
super(BCEBlurWithLogitsLoss, self).__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(FocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(QFocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class ComputeLoss:
# Compute losses
def __init__(self, model, autobalance=False):
super(ComputeLoss, self).__init__()
self.sort_obj_iou = False
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
for k in 'na', 'nc', 'nl', 'anchors':
setattr(self, k, getattr(det, k))
def __call__(self, p, targets): # predictions, targets, model
device = targets.device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
pxy = ps[:, :2].sigmoid() * 2. - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
lbox += (1.0 - iou).mean() # iou loss
# Objectness
score_iou = iou.detach().clamp(0).type(tobj.dtype)
if self.sort_obj_iou:
sort_id = torch.argsort(score_iou)
b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id]
tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio
# Classification
if self.nc > 1: # cls loss (only if multiple classes)
t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
t[range(n), tcls[i]] = self.cp
lcls += self.BCEcls(ps[:, 5:], t) # BCE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
obji = self.BCEobj(pi[..., 4], tobj)
lobj += obji * self.balance[i] # obj loss
if self.autobalance:
self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
if self.autobalance:
self.balance = [x / self.balance[self.ssi] for x in self.balance]
lbox *= self.hyp['box']
lobj *= self.hyp['obj']
lcls *= self.hyp['cls']
bs = tobj.shape[0] # batch size
return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
def build_targets(self, p, targets):
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
na, nt = self.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(self.nl):
anchors = self.anchors[i]
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
| [
"torch.zeros",
"torch.sigmoid",
"torch.cat",
"torch.arange",
"torch.max",
"torch.argsort",
"torch.ones",
"torch.abs",
"torch.full_like",
"torch.tensor",
"torch.nn.BCEWithLogitsLoss",
"torch.ones_like",
"torch.zeros_like",
"torch.exp"
] | 1.7.0 | Daesung-Jung/baseball_pitchdesign | d4dc1eb5fb42bbf0bb3564be8def0492b80a0074 |
1.0 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning XLNet for question answering with beam search using 🤗 Accelerate.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
import argparse
import logging
import math
import os
import random
from pathlib import Path
import datasets
import numpy as np
import torch
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from huggingface_hub import Repository
from transformers import (
AdamW,
DataCollatorWithPadding,
EvalPrediction,
SchedulerType,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetTokenizerFast,
default_data_collator,
get_scheduler,
set_seed,
)
from transformers.file_utils import get_full_repo_name
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from utils_qa import postprocess_qa_predictions_with_beam_search
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.14.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a Question Answering task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--do_predict", action="store_true", help="Eval the question answering model")
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--test_file", type=str, default=None, help="A csv or a json file containing the Prediction data."
)
parser.add_argument(
"--max_seq_length",
type=int,
default=384,
help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed.",
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_seq_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--doc_stride",
type=int,
default=128,
help="When splitting up a long document into chunks how much stride to take between chunks.",
)
parser.add_argument(
"--n_best_size",
type=int,
default=20,
help="The total number of n-best predictions to generate when looking for an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`.",
)
parser.add_argument(
"--version_2_with_negative",
type=bool,
default=False,
help="If true, some of the examples do not have an answer.",
)
parser.add_argument(
"--max_answer_length",
type=int,
default=30,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help="For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set.",
)
parser.add_argument(
"--max_eval_samples",
type=int,
default=None,
help="For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set.",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--max_predict_samples",
type=int,
default=None,
help="For debugging purposes or quicker training, truncate the number of prediction examples to this",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
args = parser.parse_args()
# Sanity checks
if (
args.dataset_name is None
and args.train_file is None
and args.validation_file is None
and args.test_file is None
):
raise ValueError("Need either a dataset name or a training/validation/test file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.test_file is not None:
extension = args.test_file.split(".")[-1]
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
if args.test_file is not None:
data_files["test"] = args.test_file
extension = args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, field="data")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = XLNetConfig.from_pretrained(args.model_name_or_path)
tokenizer = XLNetTokenizerFast.from_pretrained(args.model_name_or_path)
model = XLNetForQuestionAnswering.from_pretrained(
args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config
)
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
column_names = raw_datasets["train"].column_names
question_column_name = "question" if "question" in column_names else column_names[0]
context_column_name = "context" if "context" in column_names else column_names[1]
answer_column_name = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(args.max_seq_length, tokenizer.model_max_length)
# Training preprocessing
def prepare_train_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers).
special_tokens = tokenized_examples.pop("special_tokens_mask")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
tokenized_examples["is_impossible"] = []
tokenized_examples["cls_index"] = []
tokenized_examples["p_mask"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
tokenized_examples["cls_index"].append(cls_index)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples["token_type_ids"][i]
for k, s in enumerate(special_tokens[i]):
if s:
sequence_ids[k] = 3
context_idx = 1 if pad_on_right else 0
# Build the p_mask: non special tokens and context gets 0.0, the others get 1.0.
# The cls token gets 1.0 too (for predictions of empty answers).
tokenized_examples["p_mask"].append(
[
0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0
for k, s in enumerate(sequence_ids)
]
)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
tokenized_examples["is_impossible"].append(1.0)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != context_idx:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != context_idx:
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
tokenized_examples["is_impossible"].append(1.0)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
tokenized_examples["is_impossible"].append(0.0)
return tokenized_examples
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if args.max_train_samples is not None:
# We will select sample from whole data if agument is specified
train_dataset = train_dataset.select(range(args.max_train_samples))
# Create train feature from dataset
with accelerator.main_process_first():
train_dataset = train_dataset.map(
prepare_train_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
if args.max_train_samples is not None:
# Number of samples might increase during Feature Creation, We select only specified max samples
train_dataset = train_dataset.select(range(args.max_train_samples))
# Validation preprocessing
def prepare_validation_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers).
special_tokens = tokenized_examples.pop("special_tokens_mask")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
# We still provide the index of the CLS token and the p_mask to the model, but not the is_impossible label.
tokenized_examples["cls_index"] = []
tokenized_examples["p_mask"] = []
for i, input_ids in enumerate(tokenized_examples["input_ids"]):
# Find the CLS token in the input ids.
cls_index = input_ids.index(tokenizer.cls_token_id)
tokenized_examples["cls_index"].append(cls_index)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples["token_type_ids"][i]
for k, s in enumerate(special_tokens[i]):
if s:
sequence_ids[k] = 3
context_idx = 1 if pad_on_right else 0
# Build the p_mask: non special tokens and context gets 0.0, the others 1.0.
tokenized_examples["p_mask"].append(
[
0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0
for k, s in enumerate(sequence_ids)
]
)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_idx else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = raw_datasets["validation"]
if args.max_eval_samples is not None:
# We will select sample from whole data
eval_examples = eval_examples.select(range(args.max_eval_samples))
# Validation Feature Creation
with accelerator.main_process_first():
eval_dataset = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if args.max_eval_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
eval_dataset = eval_dataset.select(range(args.max_eval_samples))
if args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_examples = raw_datasets["test"]
if args.max_predict_samples is not None:
# We will select sample from whole data
predict_examples = predict_examples.select(range(args.max_predict_samples))
# Predict Feature Creation
with accelerator.main_process_first():
predict_dataset = predict_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
if args.max_predict_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
predict_dataset = predict_dataset.select(range(args.max_predict_samples))
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataset_for_model = eval_dataset.remove_columns(["example_id", "offset_mapping"])
eval_dataloader = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
if args.do_predict:
predict_dataset_for_model = predict_dataset.remove_columns(["example_id", "offset_mapping"])
predict_dataloader = DataLoader(
predict_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
# Post-processing:
def post_processing_function(examples, features, predictions, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions, scores_diff_json = postprocess_qa_predictions_with_beam_search(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=args.version_2_with_negative,
n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length,
start_n_top=model.config.start_n_top,
end_n_top=model.config.end_n_top,
output_dir=args.output_dir,
prefix=stage,
)
# Format the result to the format the metric expects.
if args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": scores_diff_json[k]}
for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = load_metric("squad_v2" if args.version_2_with_negative else "squad")
def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
"""
Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor
Args:
start_or_end_logits(:obj:`tensor`):
This is the output predictions of the model. We can only enter either start or end logits.
eval_dataset: Evaluation dataset
max_len(:obj:`int`):
The maximum length of the output tensor. ( See the model.eval() part for more details )
"""
step = 0
# create a numpy array and fill it with -100.
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32)
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather
for i, output_logit in enumerate(start_or_end_logits): # populate columns
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
# And after every iteration we have to change the step
batch_size = output_logit.shape[0]
cols = output_logit.shape[1]
if step + batch_size < len(dataset):
logits_concat[step : step + batch_size, :cols] = output_logit
else:
logits_concat[step:, :cols] = output_logit[: len(dataset) - step]
step += batch_size
return logits_concat
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
# intialize all lists to collect the batches
all_start_top_log_probs = []
all_start_top_index = []
all_end_top_log_probs = []
all_end_top_index = []
all_cls_logits = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
start_top_log_probs = outputs.start_top_log_probs
start_top_index = outputs.start_top_index
end_top_log_probs = outputs.end_top_log_probs
end_top_index = outputs.end_top_index
cls_logits = outputs.cls_logits
if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered
start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100)
start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100)
end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100)
end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100)
cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100)
all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy())
max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor
# concatenate all numpy arrays collected above
start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, eval_dataset, max_len)
start_top_index_concat = create_and_fill_np_array(all_start_top_index, eval_dataset, max_len)
end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, eval_dataset, max_len)
end_top_index_concat = create_and_fill_np_array(all_end_top_index, eval_dataset, max_len)
cls_logits_concat = np.concatenate(all_cls_logits, axis=0)
# delete the list of numpy arrays
del start_top_log_probs
del start_top_index
del end_top_log_probs
del end_top_index
del cls_logits
outputs_numpy = (
start_top_log_probs_concat,
start_top_index_concat,
end_top_log_probs_concat,
end_top_index_concat,
cls_logits_concat,
)
prediction = post_processing_function(eval_examples, eval_dataset, outputs_numpy)
eval_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
if args.do_predict:
# intialize all lists to collect the batches
all_start_top_log_probs = []
all_start_top_index = []
all_end_top_log_probs = []
all_end_top_index = []
all_cls_logits = []
for step, batch in enumerate(predict_dataloader):
with torch.no_grad():
outputs = model(**batch)
start_top_log_probs = outputs.start_top_log_probs
start_top_index = outputs.start_top_index
end_top_log_probs = outputs.end_top_log_probs
end_top_index = outputs.end_top_index
cls_logits = outputs.cls_logits
if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered
start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100)
start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100)
end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100)
end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100)
cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100)
all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy())
all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy())
max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor
# concatenate all numpy arrays collected above
start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, predict_dataset, max_len)
start_top_index_concat = create_and_fill_np_array(all_start_top_index, predict_dataset, max_len)
end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, predict_dataset, max_len)
end_top_index_concat = create_and_fill_np_array(all_end_top_index, predict_dataset, max_len)
cls_logits_concat = np.concatenate(all_cls_logits, axis=0)
# delete the list of numpy arrays
del start_top_log_probs
del start_top_index
del end_top_log_probs
del end_top_index
del cls_logits
outputs_numpy = (
start_top_log_probs_concat,
start_top_index_concat,
end_top_log_probs_concat,
end_top_index_concat,
cls_logits_concat,
)
prediction = post_processing_function(predict_examples, predict_dataset, outputs_numpy)
predict_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Predict metrics: {predict_metric}")
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
if __name__ == "__main__":
main()
| [
"torch.no_grad",
"torch.utils.data.DataLoader"
] | 1.0 | jecoz/transformers | cbfda413389830fc2788f82f49279c4c566b4194 |
1.5 | import torch
def repeat_parameter(parameter, *sizes):
parameter.data = parameter.data.repeat(*sizes)
if parameter.grad is not None:
parameter.grad = parameter.grad.repeat(*sizes)
def qkv_weight_repeat(parameter, ratio):
q, k, v = torch.chunk(parameter, 3, dim=0)
q = q.repeat(ratio, ratio)
k = k.repeat(ratio, ratio)
v = v.repeat(ratio, ratio)
return torch.cat([q, k, v], dim=0)
def expand_qkv(qkv_net, ratio):
qkv_net.weight.data = qkv_weight_repeat(qkv_net.weight.data, ratio)
if qkv_net.weight.grad is not None:
qkv_net.weight.grad = qkv_weight_repeat(qkv_net.weight.grad, ratio)
def openai_compute(n_params, batch_size, training_steps):
# given in PF/s (hence the / 24 / 3600)
return 6 * n_params * batch_size * training_steps / 24 / 3600
def excluded_from_params(parameter: torch.nn.Parameter, vocab_size=-1):
return vocab_size in parameter.shape
def non_emb_param_count(model: torch.nn.Module, vocab_size=-1):
return sum(p.numel() for p in model.parameters() if not excluded_from_params(p, vocab_size))
| [
"torch.cat",
"torch.chunk"
] | 1.5.0 | TevenLeScao/transformer-xl | f15c845d83fe7952e841cda92e277919e8ec10fe |
1.5 | import torch
import torch.nn.functional as F
import torch.autograd
import a3t.diffai.helpers as h
def catNonNullErrors(op, ref_errs=None): # the way of things is ugly
def doop(er1, er2):
erS, erL = (er1, er2)
sS, sL = (erS.size()[0], erL.size()[0])
if sS == sL: # TODO: here we know we used transformers on either side which didnt introduce new error terms (this is a hack for hybrid zonotopes and doesn't work with adaptive error term adding).
return op(erS, erL)
if ref_errs is not None:
sz = ref_errs.size()[0]
else:
sz = min(sS, sL)
p1 = op(erS[:sz], erL[:sz])
erSrem = erS[sz:]
erLrem = erS[sz:]
p2 = op(erSrem, h.zeros(erSrem.shape))
p3 = op(h.zeros(erLrem.shape), erLrem)
return torch.cat((p1, p2, p3), dim=0)
return doop
def creluBoxy(dom):
if dom.errors is None:
if dom.beta is None:
return dom.new(F.relu(dom.head), None, None)
er = dom.beta
mx = F.relu(dom.head + er)
mn = F.relu(dom.head - er)
return dom.new((mn + mx) / 2, (mx - mn) / 2, None)
aber = torch.abs(dom.errors)
sm = torch.sum(aber, 0)
if not dom.beta is None:
sm += dom.beta
mx = dom.head + sm
mn = dom.head - sm
should_box = mn.lt(0) * mx.gt(0)
gtz = dom.head.gt(0).to_dtype()
mx /= 2
newhead = h.ifThenElse(should_box, mx, gtz * dom.head)
newbeta = h.ifThenElse(should_box, mx, gtz * (dom.beta if not dom.beta is None else 0))
newerr = (1 - should_box.to_dtype()) * gtz * dom.errors
return dom.new(newhead, newbeta, newerr)
def creluBoxySound(dom):
if dom.errors is None:
if dom.beta is None:
return dom.new(F.relu(dom.head), None, None)
er = dom.beta
mx = F.relu(dom.head + er)
mn = F.relu(dom.head - er)
return dom.new((mn + mx) / 2, (mx - mn) / 2 + 2e-6, None)
aber = torch.abs(dom.errors)
sm = torch.sum(aber, 0)
if not dom.beta is None:
sm += dom.beta
mx = dom.head + sm
mn = dom.head - sm
should_box = mn.lt(0) * mx.gt(0)
gtz = dom.head.gt(0).to_dtype()
mx /= 2
newhead = h.ifThenElse(should_box, mx, gtz * dom.head)
newbeta = h.ifThenElse(should_box, mx + 2e-6, gtz * (dom.beta if not dom.beta is None else 0))
newerr = (1 - should_box.to_dtype()) * gtz * dom.errors
return dom.new(newhead, newbeta, newerr)
def creluSwitch(dom):
if dom.errors is None:
if dom.beta is None:
return dom.new(F.relu(dom.head), None, None)
er = dom.beta
mx = F.relu(dom.head + er)
mn = F.relu(dom.head - er)
return dom.new((mn + mx) / 2, (mx - mn) / 2, None)
aber = torch.abs(dom.errors)
sm = torch.sum(aber, 0)
if not dom.beta is None:
sm += dom.beta
mn = dom.head - sm
mx = sm
mx += dom.head
should_box = mn.lt(0) * mx.gt(0)
gtz = dom.head.gt(0)
mn.neg_()
should_boxer = mn.gt(mx)
mn /= 2
newhead = h.ifThenElse(should_box, h.ifThenElse(should_boxer, mx / 2, dom.head + mn), gtz.to_dtype() * dom.head)
zbet = dom.beta if not dom.beta is None else 0
newbeta = h.ifThenElse(should_box, h.ifThenElse(should_boxer, mx / 2, mn + zbet), gtz.to_dtype() * zbet)
newerr = h.ifThenElseL(should_box, 1 - should_boxer, gtz).to_dtype() * dom.errors
return dom.new(newhead, newbeta, newerr)
def creluSmooth(dom):
if dom.errors is None:
if dom.beta is None:
return dom.new(F.relu(dom.head), None, None)
er = dom.beta
mx = F.relu(dom.head + er)
mn = F.relu(dom.head - er)
return dom.new((mn + mx) / 2, (mx - mn) / 2, None)
aber = torch.abs(dom.errors)
sm = torch.sum(aber, 0)
if not dom.beta is None:
sm += dom.beta
mn = dom.head - sm
mx = sm
mx += dom.head
nmn = F.relu(-1 * mn)
zbet = (dom.beta if not dom.beta is None else 0)
newheadS = dom.head + nmn / 2
newbetaS = zbet + nmn / 2
newerrS = dom.errors
mmx = F.relu(mx)
newheadB = mmx / 2
newbetaB = newheadB
newerrB = 0
eps = 0.0001
t = nmn / (mmx + nmn + eps) # mn.lt(0).to_dtype() * F.sigmoid(nmn - nmx)
shouldnt_zero = mx.gt(0).to_dtype()
newhead = shouldnt_zero * ((1 - t) * newheadS + t * newheadB)
newbeta = shouldnt_zero * ((1 - t) * newbetaS + t * newbetaB)
newerr = shouldnt_zero * ((1 - t) * newerrS + t * newerrB)
return dom.new(newhead, newbeta, newerr)
def creluNIPS(dom):
if dom.errors is None:
if dom.beta is None:
return dom.new(F.relu(dom.head), None, None)
er = dom.beta
mx = F.relu(dom.head + er)
mn = F.relu(dom.head - er)
return dom.new((mn + mx) / 2, (mx - mn) / 2, None)
sm = torch.sum(torch.abs(dom.errors), 0)
if not dom.beta is None:
sm += dom.beta
mn = dom.head - sm
mx = dom.head + sm
mngz = mn >= 0.0
zs = h.zeros(dom.head.shape)
diff = mx - mn
lam = torch.where((mx > 0) & (diff > 0.0), mx / diff, zs)
mu = lam * mn * (-0.5)
betaz = zs if dom.beta is None else dom.beta
newhead = torch.where(mngz, dom.head, lam * dom.head + mu)
mngz += diff <= 0.0
newbeta = torch.where(mngz, betaz, lam * betaz + mu) # mu is always positive on this side
newerr = torch.where(mngz, dom.errors, lam * dom.errors)
return dom.new(newhead, newbeta, newerr)
class MaxTypes:
@staticmethod
def ub(x):
return x.ub()
@staticmethod
def only_beta(x):
return x.beta if x.beta is not None else x.head * 0
@staticmethod
def head_beta(x):
return MaxTypes.only_beta(x) + x.head
class HybridZonotope:
def isSafe(self, target):
od, _ = torch.min(h.preDomRes(self, target).lb(), 1)
return od.gt(0.0).long()
def isPoint(self):
return False
def labels(self):
target = torch.max(self.ub(), 1)[1]
l = list(h.preDomRes(self, target).lb()[0])
return [target.item()] + [i for i, v in zip(range(len(l)), l) if v <= 0]
def relu(self):
return self.customRelu(self)
def __init__(self, head, beta, errors, customRelu=creluBoxy, **kargs):
self.head = head
self.errors = errors
self.beta = beta
self.customRelu = creluBoxy if customRelu is None else customRelu
def new(self, *args, customRelu=None, **kargs):
return self.__class__(*args, **kargs,
customRelu=self.customRelu if customRelu is None else customRelu).checkSizes()
def zono_to_hybrid(self, *args, **kargs): # we are already a hybrid zono.
return self.new(self.head, self.beta, self.errors, **kargs)
def hybrid_to_zono(self, *args, correlate=True, customRelu=None, **kargs):
beta = self.beta
errors = self.errors
if correlate and beta is not None:
batches = beta.shape[0]
num_elem = h.product(beta.shape[1:])
ei = h.getEi(batches, num_elem)
if len(beta.shape) > 2:
ei = ei.contiguous().view(num_elem, *beta.shape)
err = ei * beta
errors = torch.cat((err, errors), dim=0) if errors is not None else err
beta = None
return Zonotope(self.head, beta, errors if errors is not None else (self.beta * 0).unsqueeze(0),
customRelu=self.customRelu if customRelu is None else None)
def abstractApplyLeaf(self, foo, *args, **kargs):
return getattr(self, foo)(*args, **kargs)
def decorrelate(self, cc_indx_batch_err): # keep these errors
if self.errors is None:
return self
batch_size = self.head.shape[0]
num_error_terms = self.errors.shape[0]
beta = h.zeros(self.head.shape).to_dtype() if self.beta is None else self.beta
errors = h.zeros([0] + list(self.head.shape)).to_dtype() if self.errors is None else self.errors
inds_i = torch.arange(self.head.shape[0], device=h.device).unsqueeze(1).long()
errors = errors.to_dtype().permute(1, 0, *list(range(len(self.errors.shape)))[2:])
sm = errors.clone()
sm[inds_i, cc_indx_batch_err] = 0
beta = beta.to_dtype() + sm.abs().sum(dim=1)
errors = errors[inds_i, cc_indx_batch_err]
errors = errors.permute(1, 0, *list(range(len(self.errors.shape)))[2:]).contiguous()
return self.new(self.head, beta, errors)
def dummyDecorrelate(self, num_decorrelate):
if num_decorrelate == 0 or self.errors is None:
return self
elif num_decorrelate >= self.errors.shape[0]:
beta = self.beta
if self.errors is not None:
errs = self.errors.abs().sum(dim=0)
if beta is None:
beta = errs
else:
beta += errs
return self.new(self.head, beta, None)
return None
def stochasticDecorrelate(self, num_decorrelate, choices=None, num_to_keep=False):
dummy = self.dummyDecorrelate(num_decorrelate)
if dummy is not None:
return dummy
num_error_terms = self.errors.shape[0]
batch_size = self.head.shape[0]
ucc_mask = h.ones([batch_size, self.errors.shape[0]]).long()
cc_indx_batch_err = h.cudify(torch.multinomial(ucc_mask.to_dtype(),
num_decorrelate if num_to_keep else num_error_terms - num_decorrelate,
replacement=False)) if choices is None else choices
return self.decorrelate(cc_indx_batch_err)
def decorrelateMin(self, num_decorrelate, num_to_keep=False):
dummy = self.dummyDecorrelate(num_decorrelate)
if dummy is not None:
return dummy
num_error_terms = self.errors.shape[0]
batch_size = self.head.shape[0]
error_sum_b_e = self.errors.abs().view(self.errors.shape[0], batch_size, -1).sum(dim=2).permute(1, 0)
cc_indx_batch_err = error_sum_b_e.topk(num_decorrelate if num_to_keep else num_error_terms - num_decorrelate)[1]
return self.decorrelate(cc_indx_batch_err)
def correlate(self, cc_indx_batch_beta): # given in terms of the flattened matrix.
num_correlate = h.product(cc_indx_batch_beta.shape[1:])
beta = h.zeros(self.head.shape).to_dtype() if self.beta is None else self.beta
errors = h.zeros([0] + list(self.head.shape)).to_dtype() if self.errors is None else self.errors
batch_size = beta.shape[0]
new_errors = h.zeros([num_correlate] + list(self.head.shape)).to_dtype()
inds_i = torch.arange(batch_size, device=h.device).unsqueeze(1).long()
nc = torch.arange(num_correlate, device=h.device).unsqueeze(1).long()
new_errors = new_errors.permute(1, 0, *list(range(len(new_errors.shape)))[2:]).contiguous().view(batch_size,
num_correlate,
-1)
new_errors[inds_i, nc.unsqueeze(0).expand([batch_size] + list(nc.shape)).squeeze(2), cc_indx_batch_beta] = \
beta.view(batch_size, -1)[inds_i, cc_indx_batch_beta]
new_errors = new_errors.permute(1, 0, *list(range(len(new_errors.shape)))[2:]).contiguous().view(num_correlate,
batch_size,
*beta.shape[
1:])
errors = torch.cat((errors, new_errors), dim=0)
beta.view(batch_size, -1)[inds_i, cc_indx_batch_beta] = 0
return self.new(self.head, beta, errors)
def stochasticCorrelate(self, num_correlate, choices=None):
if num_correlate == 0:
return self
domshape = self.head.shape
batch_size = domshape[0]
num_pixs = h.product(domshape[1:])
num_correlate = min(num_correlate, num_pixs)
ucc_mask = h.ones([batch_size, num_pixs]).long()
cc_indx_batch_beta = h.cudify(
torch.multinomial(ucc_mask.to_dtype(), num_correlate, replacement=False)) if choices is None else choices
return self.correlate(cc_indx_batch_beta)
def correlateMaxK(self, num_correlate):
if num_correlate == 0:
return self
domshape = self.head.shape
batch_size = domshape[0]
num_pixs = h.product(domshape[1:])
num_correlate = min(num_correlate, num_pixs)
concrete_max_image = self.ub().view(batch_size, -1)
cc_indx_batch_beta = concrete_max_image.topk(num_correlate)[1]
return self.correlate(cc_indx_batch_beta)
def correlateMaxPool(self, *args, max_type=MaxTypes.ub, max_pool=F.max_pool2d, **kargs):
domshape = self.head.shape
batch_size = domshape[0]
num_pixs = h.product(domshape[1:])
concrete_max_image = max_type(self)
cc_indx_batch_beta = max_pool(concrete_max_image, *args, return_indices=True, **kargs)[1].view(batch_size, -1)
return self.correlate(cc_indx_batch_beta)
def checkSizes(self):
if not self.errors is None:
if not self.errors.size()[1:] == self.head.size():
raise Exception("Such bad sizes on error:", self.errors.shape, " head:", self.head.shape)
if torch.isnan(self.errors).any():
raise Exception("Such nan in errors")
if not self.beta is None:
if not self.beta.size() == self.head.size():
raise Exception("Such bad sizes on beta")
if torch.isnan(self.beta).any():
raise Exception("Such nan in errors")
if self.beta.lt(0.0).any():
self.beta = self.beta.abs()
return self
def __mul__(self, flt):
return self.new(self.head * flt, None if self.beta is None else self.beta * abs(flt),
None if self.errors is None else self.errors * flt)
def __truediv__(self, flt):
flt = 1. / flt
return self.new(self.head * flt, None if self.beta is None else self.beta * abs(flt),
None if self.errors is None else self.errors * flt)
def __add__(self, other):
if isinstance(other, HybridZonotope):
return self.new(self.head + other.head, h.msum(self.beta, other.beta, lambda a, b: a + b),
h.msum(self.errors, other.errors, catNonNullErrors(lambda a, b: a + b)))
else:
# other has to be a standard variable or tensor
return self.new(self.head + other, self.beta, self.errors)
def addPar(self, a, b):
return self.new(a.head + b.head, h.msum(a.beta, b.beta, lambda a, b: a + b),
h.msum(a.errors, b.errors, catNonNullErrors(lambda a, b: a + b, self.errors)))
def __sub__(self, other):
if isinstance(other, HybridZonotope):
return self.new(self.head - other.head
, h.msum(self.beta, other.beta, lambda a, b: a + b)
, h.msum(self.errors, None if other.errors is None else -other.errors,
catNonNullErrors(lambda a, b: a + b)))
else:
# other has to be a standard variable or tensor
return self.new(self.head - other, self.beta, self.errors)
def bmm(self, other):
hd = self.head.bmm(other)
bet = None if self.beta is None else self.beta.bmm(other.abs())
if self.errors is None:
er = None
else:
er = self.errors.matmul(other)
return self.new(hd, bet, er)
def getBeta(self):
return self.head * 0 if self.beta is None else self.beta
def getErrors(self):
return (self.head * 0).unsqueeze(0) if self.beta is None else self.errors
def merge(self, other,
ref=None): # the vast majority of the time ref should be none here. Not for parallel computation with powerset
s_beta = self.getBeta() # so that beta is never none
sbox_u = self.head + s_beta
sbox_l = self.head - s_beta
o_u = other.ub()
o_l = other.lb()
o_in_s = (o_u <= sbox_u) & (o_l >= sbox_l)
s_err_mx = self.errors.abs().sum(dim=0)
if not isinstance(other, HybridZonotope):
new_head = (self.head + other.center()) / 2
new_beta = torch.max(sbox_u + s_err_mx, o_u) - new_head
return self.new(torch.where(o_in_s, self.head, new_head), torch.where(o_in_s, self.beta, new_beta),
o_in_s.float() * self.errors)
# TODO: could be more efficient if one of these doesn't have beta or errors but thats okay for now.
s_u = sbox_u + s_err_mx
s_l = sbox_l - s_err_mx
obox_u = o_u - other.head
obox_l = o_l + other.head
s_in_o = (s_u <= obox_u) & (s_l >= obox_l)
# TODO: could theoretically still do something better when one is contained partially in the other
new_head = (self.head + other.center()) / 2
new_beta = torch.max(sbox_u + self.getErrors().abs().sum(dim=0), o_u) - new_head
return self.new(torch.where(o_in_s, self.head, torch.where(s_in_o, other.head, new_head))
, torch.where(o_in_s, s_beta, torch.where(s_in_o, other.getBeta(), new_beta))
, h.msum(o_in_s.float() * self.errors, s_in_o.float() * other.errors,
catNonNullErrors(lambda a, b: a + b,
ref_errs=ref.errors if ref is not None else ref))) # these are both zero otherwise
def conv(self, conv, weight, bias=None, **kargs):
h = self.errors
inter = h if h is None else h.view(-1, *h.size()[2:])
hd = conv(self.head, weight, bias=bias, **kargs)
res = h if h is None else conv(inter, weight, bias=None, **kargs)
return self.new(hd
, None if self.beta is None else conv(self.beta, weight.abs(), bias=None, **kargs)
, h if h is None else res.view(h.size()[0], h.size()[1], *res.size()[1:]))
def conv1d(self, *args, **kargs):
return self.conv(lambda x, *args, **kargs: x.conv1d(*args, **kargs), *args, **kargs)
def conv2d(self, *args, **kargs):
return self.conv(lambda x, *args, **kargs: x.conv2d(*args, **kargs), *args, **kargs)
def conv3d(self, *args, **kargs):
return self.conv(lambda x, *args, **kargs: x.conv3d(*args, **kargs), *args, **kargs)
def conv_transpose1d(self, *args, **kargs):
return self.conv(lambda x, *args, **kargs: x.conv_transpose1d(*args, **kargs), *args, **kargs)
def conv_transpose2d(self, *args, **kargs):
return self.conv(lambda x, *args, **kargs: x.conv_transpose2d(*args, **kargs), *args, **kargs)
def conv_transpose3d(self, *args, **kargs):
return self.conv(lambda x, *args, **kargs: x.conv_transpose3d(*args, **kargs), *args, **kargs)
def matmul(self, other):
return self.new(self.head.matmul(other), None if self.beta is None else self.beta.matmul(other.abs()),
None if self.errors is None else self.errors.matmul(other))
def unsqueeze(self, i):
return self.new(self.head.unsqueeze(i), None if self.beta is None else self.beta.unsqueeze(i),
None if self.errors is None else self.errors.unsqueeze(i + 1))
def squeeze(self, dim):
return self.new(self.head.squeeze(dim),
None if self.beta is None else self.beta.squeeze(dim),
None if self.errors is None else self.errors.squeeze(dim + 1 if dim >= 0 else dim))
def double(self):
return self.new(self.head.double(), self.beta.double() if self.beta is not None else None,
self.errors.double() if self.errors is not None else None)
def float(self):
return self.new(self.head.float(), self.beta.float() if self.beta is not None else None,
self.errors.float() if self.errors is not None else None)
def to_dtype(self):
return self.new(self.head.to_dtype(), self.beta.to_dtype() if self.beta is not None else None,
self.errors.to_dtype() if self.errors is not None else None)
def sum(self, dim=1):
return self.new(torch.sum(self.head, dim=dim), None if self.beta is None else torch.sum(self.beta, dim=dim),
None if self.errors is None else torch.sum(self.errors, dim=dim + 1 if dim >= 0 else dim))
def view(self, *newshape):
return self.new(self.head.view(*newshape),
None if self.beta is None else self.beta.view(*newshape),
None if self.errors is None else self.errors.view(self.errors.size()[0], *newshape))
def gather(self, dim, index):
return self.new(self.head.gather(dim, index),
None if self.beta is None else self.beta.gather(dim, index),
None if self.errors is None else self.errors.gather(dim + 1, index.expand(
[self.errors.size()[0]] + list(index.size()))))
def concretize(self):
if self.errors is None:
return self
return self.new(self.head, torch.sum(self.concreteErrors().abs(), 0), None) # maybe make a box?
def cat(self, other, dim=0):
return self.new(self.head.cat(other.head, dim=dim),
h.msum(other.beta, self.beta, lambda a, b: a.cat(b, dim=dim)),
h.msum(self.errors, other.errors, catNonNullErrors(lambda a, b: a.cat(b, dim + 1))))
def split(self, split_size, dim=0):
heads = list(self.head.split(split_size, dim))
betas = list(self.beta.split(split_size, dim)) if not self.beta is None else None
errorss = list(self.errors.split(split_size, dim + 1)) if not self.errors is None else None
def makeFromI(i):
return self.new(heads[i],
None if betas is None else betas[i],
None if errorss is None else errorss[i])
return tuple(makeFromI(i) for i in range(len(heads)))
def concreteErrors(self):
if self.beta is None and self.errors is None:
raise Exception("shouldn't have both beta and errors be none")
if self.errors is None:
return self.beta.unsqueeze(0)
if self.beta is None:
return self.errors
return torch.cat([self.beta.unsqueeze(0), self.errors], dim=0)
def applyMonotone(self, foo, *args, **kargs):
if self.beta is None and self.errors is None:
return self.new(foo(self.head), None, None)
beta = self.concreteErrors().abs().sum(dim=0)
tp = foo(self.head + beta, *args, **kargs)
bt = foo(self.head - beta, *args, **kargs)
new_hybrid = self.new((tp + bt) / 2, (tp - bt) / 2, None)
if self.errors is not None:
return new_hybrid.correlateMaxK(self.errors.shape[0])
return new_hybrid
def avg_pool2d(self, *args, **kargs):
nhead = F.avg_pool2d(self.head, *args, **kargs)
return self.new(nhead,
None if self.beta is None else F.avg_pool2d(self.beta, *args, **kargs),
None if self.errors is None else F.avg_pool2d(self.errors.view(-1, *self.head.shape[1:]), *args,
**kargs).view(-1, *nhead.shape))
def adaptive_avg_pool2d(self, *args, **kargs):
nhead = F.adaptive_avg_pool2d(self.head, *args, **kargs)
return self.new(nhead,
None if self.beta is None else F.adaptive_avg_pool2d(self.beta, *args, **kargs),
None if self.errors is None else F.adaptive_avg_pool2d(
self.errors.view(-1, *self.head.shape[1:]), *args, **kargs).view(-1, *nhead.shape))
def elu(self):
return self.applyMonotone(F.elu)
def selu(self):
return self.applyMonotone(F.selu)
def sigm(self):
return self.applyMonotone(F.sigmoid)
def softplus(self):
if self.errors is None:
if self.beta is None:
return self.new(F.softplus(self.head), None, None)
tp = F.softplus(self.head + self.beta)
bt = F.softplus(self.head - self.beta)
return self.new((tp + bt) / 2, (tp - bt) / 2, None)
errors = self.concreteErrors()
o = h.ones(self.head.size())
def sp(hd):
return F.softplus(hd) # torch.log(o + torch.exp(hd)) # not very stable
def spp(hd):
ehd = torch.exp(hd)
return ehd.div(ehd + o)
def sppp(hd):
ehd = torch.exp(hd)
md = ehd + o
return ehd.div(md.mul(md))
fa = sp(self.head)
fpa = spp(self.head)
a = self.head
k = torch.sum(errors.abs(), 0)
def evalG(r):
return r.mul(r).mul(sppp(a + r))
m = torch.max(evalG(h.zeros(k.size())), torch.max(evalG(k), evalG(-k)))
m = h.ifThenElse(a.abs().lt(k), torch.max(m, torch.max(evalG(a), evalG(-a))), m)
m /= 2
return self.new(fa, m if self.beta is None else m + self.beta.mul(fpa),
None if self.errors is None else self.errors.mul(fpa))
def center(self):
return self.head
def vanillaTensorPart(self):
return self.head
def lb(self):
return self.head - self.concreteErrors().abs().sum(dim=0)
def ub(self):
return self.head + self.concreteErrors().abs().sum(dim=0)
def size(self):
return self.head.size()
def diameter(self):
abal = torch.abs(self.concreteErrors()).transpose(0, 1)
return abal.sum(1).sum(1) # perimeter
def loss(self, target, **args):
r = -h.preDomRes(self, target).lb()
return F.softplus(r.max(1)[0])
def deep_loss(self, act=F.relu, *args, **kargs):
batch_size = self.head.shape[0]
inds = torch.arange(batch_size, device=h.device).unsqueeze(1).long()
def dl(l, u):
ls, lsi = torch.sort(l, dim=1)
ls_u = u[inds, lsi]
def slidingMax(a): # using maxpool
k = a.shape[1]
ml = a.min(dim=1)[0].unsqueeze(1)
inp = torch.cat((h.zeros([batch_size, k]), a - ml), dim=1)
mpl = F.max_pool1d(inp.unsqueeze(1), kernel_size=k, stride=1, padding=0, return_indices=False).squeeze(
1)
return mpl[:, :-1] + ml
return act(slidingMax(ls_u) - ls).sum(dim=1)
l = self.lb().view(batch_size, -1)
u = self.ub().view(batch_size, -1)
return (dl(l, u) + dl(-u, -l)) / (2 * l.shape[1]) # make it easier to regularize against
class Zonotope(HybridZonotope):
def applySuper(self, ret):
batches = ret.head.size()[0]
num_elem = h.product(ret.head.size()[1:])
ei = h.getEi(batches, num_elem)
if len(ret.head.size()) > 2:
ei = ei.contiguous().view(num_elem, *ret.head.size())
ret.errors = torch.cat((ret.errors, ei * ret.beta)) if not ret.beta is None else ret.errors
ret.beta = None
return ret.checkSizes()
def zono_to_hybrid(self, *args, customRelu=None, **kargs): # we are already a hybrid zono.
return HybridZonotope(self.head, self.beta, self.errors,
customRelu=self.customRelu if customRelu is None else customRelu)
def hybrid_to_zono(self, *args, **kargs):
return self.new(self.head, self.beta, self.errors, **kargs)
def applyMonotone(self, *args, **kargs):
return self.applySuper(super(Zonotope, self).applyMonotone(*args, **kargs))
def softplus(self):
return self.applySuper(super(Zonotope, self).softplus())
def relu(self):
return self.applySuper(super(Zonotope, self).relu())
def splitRelu(self, *args, **kargs):
return [self.applySuper(a) for a in super(Zonotope, self).splitRelu(*args, **kargs)]
def mysign(x):
e = x.eq(0).to_dtype()
r = x.sign().to_dtype()
return r + e
def mulIfEq(grad, out, target):
pred = out.max(1, keepdim=True)[1]
is_eq = pred.eq(target.view_as(pred)).to_dtype()
is_eq = is_eq.view([-1] + [1 for _ in grad.size()[1:]]).expand_as(grad)
return is_eq
def stdLoss(out, target):
if torch.__version__[0] == "0":
return F.cross_entropy(out, target, reduce=False)
else:
return F.cross_entropy(out, target, reduction='none')
class ListDomain(object):
def __init__(self, al, *args, **kargs):
self.al = list(al)
def new(self, *args, **kargs):
return self.__class__(*args, **kargs)
def isSafe(self, *args, **kargs):
raise Exception("Domain Not Suitable For Testing")
def labels(self):
raise Exception("Domain Not Suitable For Testing")
def isPoint(self):
return all(a.isPoint() for a in self.al)
def __mul__(self, flt):
return self.new(a.__mul__(flt) for a in self.al)
def __truediv__(self, flt):
return self.new(a.__truediv__(flt) for a in self.al)
def __add__(self, other):
if isinstance(other, ListDomain):
return self.new(a.__add__(o) for a, o in zip(self.al, other.al))
else:
return self.new(a.__add__(other) for a in self.al)
def merge(self, other, ref=None):
if ref is None:
return self.new(a.merge(o) for a, o in zip(self.al, other.al))
return self.new(a.merge(o, ref=r) for a, o, r in zip(self.al, other.al, ref.al))
def addPar(self, a, b):
return self.new(s.addPar(av, bv) for s, av, bv in zip(self.al, a.al, b.al))
def __sub__(self, other):
if isinstance(other, ListDomain):
return self.new(a.__sub__(o) for a, o in zip(self.al, other.al))
else:
return self.new(a.__sub__(other) for a in self.al)
def abstractApplyLeaf(self, *args, **kargs):
return self.new(a.abstractApplyLeaf(*args, **kargs) for a in self.al)
def bmm(self, other):
return self.new(a.bmm(other) for a in self.al)
def matmul(self, other):
return self.new(a.matmul(other) for a in self.al)
def conv(self, *args, **kargs):
return self.new(a.conv(*args, **kargs) for a in self.al)
def conv1d(self, *args, **kargs):
return self.new(a.conv1d(*args, **kargs) for a in self.al)
def conv2d(self, *args, **kargs):
return self.new(a.conv2d(*args, **kargs) for a in self.al)
def conv3d(self, *args, **kargs):
return self.new(a.conv3d(*args, **kargs) for a in self.al)
def max_pool2d(self, *args, **kargs):
return self.new(a.max_pool2d(*args, **kargs) for a in self.al)
def avg_pool2d(self, *args, **kargs):
return self.new(a.avg_pool2d(*args, **kargs) for a in self.al)
def adaptive_avg_pool2d(self, *args, **kargs):
return self.new(a.adaptive_avg_pool2d(*args, **kargs) for a in self.al)
def unsqueeze(self, *args, **kargs):
return self.new(a.unsqueeze(*args, **kargs) for a in self.al)
def squeeze(self, *args, **kargs):
return self.new(a.squeeze(*args, **kargs) for a in self.al)
def view(self, *args, **kargs):
return self.new(a.view(*args, **kargs) for a in self.al)
def gather(self, *args, **kargs):
return self.new(a.gather(*args, **kargs) for a in self.al)
def sum(self, *args, **kargs):
return self.new(a.sum(*args, **kargs) for a in self.al)
def double(self):
return self.new(a.double() for a in self.al)
def float(self):
return self.new(a.float() for a in self.al)
def to_dtype(self):
return self.new(a.to_dtype() for a in self.al)
def vanillaTensorPart(self):
return self.al[0].vanillaTensorPart()
def center(self):
return self.new(a.center() for a in self.al)
def ub(self):
return self.new(a.ub() for a in self.al)
def lb(self):
return self.new(a.lb() for a in self.al)
def relu(self):
return self.new(a.relu() for a in self.al)
def splitRelu(self, *args, **kargs):
return self.new(a.splitRelu(*args, **kargs) for a in self.al)
def softplus(self):
return self.new(a.softplus() for a in self.al)
def elu(self):
return self.new(a.elu() for a in self.al)
def selu(self):
return self.new(a.selu() for a in self.al)
def sigm(self):
return self.new(a.sigm() for a in self.al)
def cat(self, other, *args, **kargs):
return self.new(a.cat(o, *args, **kargs) for a, o in zip(self.al, other.al))
def split(self, *args, **kargs):
return [self.new(*z) for z in zip(a.split(*args, **kargs) for a in self.al)]
def size(self):
return self.al[0].size()
def loss(self, *args, **kargs):
return sum(a.loss(*args, **kargs) for a in self.al)
def deep_loss(self, *args, **kargs):
return sum(a.deep_loss(*args, **kargs) for a in self.al)
def checkSizes(self):
for a in self.al:
a.checkSizes()
return self
class TaggedDomain(object):
def __init__(self, a, tag=None):
self.tag = tag
self.a = a
def isSafe(self, *args, **kargs):
return self.a.isSafe(*args, **kargs)
def isPoint(self):
return self.a.isPoint()
def labels(self):
raise Exception("Domain Not Suitable For Testing")
def __mul__(self, flt):
return TaggedDomain(self.a.__mul__(flt), self.tag)
def __truediv__(self, flt):
return TaggedDomain(self.a.__truediv__(flt), self.tag)
def __add__(self, other):
if isinstance(other, TaggedDomain):
return TaggedDomain(self.a.__add__(other.a), self.tag)
else:
return TaggedDomain(self.a.__add__(other), self.tag)
def addPar(self, a, b):
return TaggedDomain(self.a.addPar(a.a, b.a), self.tag)
def __sub__(self, other):
if isinstance(other, TaggedDomain):
return TaggedDomain(self.a.__sub__(other.a), self.tag)
else:
return TaggedDomain(self.a.__sub__(other), self.tag)
def bmm(self, other):
return TaggedDomain(self.a.bmm(other), self.tag)
def matmul(self, other):
return TaggedDomain(self.a.matmul(other), self.tag)
def conv(self, *args, **kargs):
return TaggedDomain(self.a.conv(*args, **kargs), self.tag)
def conv1d(self, *args, **kargs):
return TaggedDomain(self.a.conv1d(*args, **kargs), self.tag)
def conv2d(self, *args, **kargs):
return TaggedDomain(self.a.conv2d(*args, **kargs), self.tag)
def conv3d(self, *args, **kargs):
return TaggedDomain(self.a.conv3d(*args, **kargs), self.tag)
def max_pool2d(self, *args, **kargs):
return TaggedDomain(self.a.max_pool2d(*args, **kargs), self.tag)
def avg_pool2d(self, *args, **kargs):
return TaggedDomain(self.a.avg_pool2d(*args, **kargs), self.tag)
def adaptive_avg_pool2d(self, *args, **kargs):
return TaggedDomain(self.a.adaptive_avg_pool2d(*args, **kargs), self.tag)
def unsqueeze(self, *args, **kargs):
return TaggedDomain(self.a.unsqueeze(*args, **kargs), self.tag)
def squeeze(self, *args, **kargs):
return TaggedDomain(self.a.squeeze(*args, **kargs), self.tag)
def abstractApplyLeaf(self, *args, **kargs):
return TaggedDomain(self.a.abstractApplyLeaf(*args, **kargs), self.tag)
def view(self, *args, **kargs):
return TaggedDomain(self.a.view(*args, **kargs), self.tag)
def gather(self, *args, **kargs):
return TaggedDomain(self.a.gather(*args, **kargs), self.tag)
def sum(self, *args, **kargs):
return TaggedDomain(self.a.sum(*args, **kargs), self.tag)
def double(self):
return TaggedDomain(self.a.double(), self.tag)
def float(self):
return TaggedDomain(self.a.float(), self.tag)
def to_dtype(self):
return TaggedDomain(self.a.to_dtype(), self.tag)
def vanillaTensorPart(self):
return self.a.vanillaTensorPart()
def center(self):
return TaggedDomain(self.a.center(), self.tag)
def ub(self):
return TaggedDomain(self.a.ub(), self.tag)
def lb(self):
return TaggedDomain(self.a.lb(), self.tag)
def relu(self):
return TaggedDomain(self.a.relu(), self.tag)
def splitRelu(self, *args, **kargs):
return TaggedDomain(self.a.splitRelu(*args, **kargs), self.tag)
def diameter(self):
return self.a.diameter()
def softplus(self):
return TaggedDomain(self.a.softplus(), self.tag)
def elu(self):
return TaggedDomain(self.a.elu(), self.tag)
def selu(self):
return TaggedDomain(self.a.selu(), self.tag)
def sigm(self):
return TaggedDomain(self.a.sigm(), self.tag)
def cat(self, other, *args, **kargs):
return TaggedDomain(self.a.cat(other.a, *args, **kargs), self.tag)
def split(self, *args, **kargs):
return [TaggedDomain(z, self.tag) for z in self.a.split(*args, **kargs)]
def size(self):
return self.a.size()
def loss(self, *args, **kargs):
return self.tag.loss(self.a, *args, **kargs)
def deep_loss(self, *args, **kargs):
return self.a.deep_loss(*args, **kargs)
def checkSizes(self):
self.a.checkSizes()
return self
def merge(self, other, ref=None):
return TaggedDomain(self.a.merge(other.a, ref=None if ref is None else ref.a), self.tag)
class ListConjDomain(ListDomain):
def __init__(self, al, *args, **kargs):
super(ListConjDomain, self).__init__(al, args, kargs)
def isSafe(self, target):
assert len(self.al) > 0
od, _ = torch.min(h.preDomRes(self.al[0], target).lb(), 1)
for a in self.al[1:]:
od1, _ = torch.min(h.preDomRes(a, target).lb(), 1)
od = torch.max(od, od1)
return od.gt(0.0).long()
def labels(self):
raise NotImplementedError()
def loss(self, target):
r = self.al[0].ub()
inds = torch.arange(r.shape[0], device=h.device, dtype=h.ltype)
for a in self.al[1:]:
r1 = a.ub()
r = torch.min(r, r1)
t = self.al[0].lb()[inds, target]
for a in self.al[1:]:
t1 = a.lb()[inds, target]
t = torch.max(t, t1)
r[inds, target] = t
return r.loss(target)
class ListDisjDomain(ListDomain):
def __init__(self, al, *args, **kargs):
super(ListDisjDomain, self).__init__(al, args, kargs)
def isSafe(self, target):
assert len(self.al) > 0
od, _ = torch.min(h.preDomRes(self.al[0], target).lb(), 1)
for a in self.al[1:]:
od1, _ = torch.min(h.preDomRes(a, target).lb(), 1)
od = torch.min(od, od1)
return od.gt(0.0).long()
def labels(self):
raise NotImplementedError()
def loss(self, target):
assert len(self.al) > 0
r = self.al[0].ub()
inds = torch.arange(r.shape[0], device=h.device, dtype=h.ltype)
for a in self.al[1:]:
r1 = a.ub()
r = torch.max(r, r1)
t = self.al[0].lb()[inds, target]
for a in self.al[1:]:
t1 = a.lb()[inds, target]
t = torch.min(t, t1)
r[inds, target] = t
return r.loss(target)
class LabeledDomain(object):
def __init__(self, label):
self.label = label
def box(self, o):
self.o = o
def to_dtype(self):
return self
def center(self):
return self.o
| [
"torch.cat",
"torch.nn.functional.avg_pool2d",
"torch.nn.functional.softplus",
"torch.min",
"torch.arange",
"torch.max",
"torch.isnan",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.abs",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.relu",
"torch.exp",
"torch.sort",
"torch.where",
"torch.sum"
] | 1.5 | ForeverZyh/NLP_training_framework | d5cc28bf389a3f57439a5650a614e341ea333bb8 |
1.10 | # coding=utf-8
# Copyright (c) DIRECT Contributors
"""MRI model engine of DIRECT."""
import gc
import pathlib
import time
from abc import abstractmethod
from collections import defaultdict
from os import PathLike
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import direct.data.transforms as T
from direct.config import BaseConfig
from direct.engine import DoIterationOutput, Engine
from direct.functionals import SSIMLoss
from direct.utils import communication, merge_list_of_dicts, multiply_function, reduce_list_of_dicts
from direct.utils.communication import reduce_tensor_dict
def _crop_volume(
source: torch.Tensor, target: torch.Tensor, resolution: Union[List[int], Tuple[int, ...]]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""2D source/target cropper.
Parameters
----------
source: torch.Tensor
Has shape (batch, height, width)
target: torch.Tensor
Has shape (batch, height, width)
resolution: list of ints or tuple of ints
Target resolution.
Returns
-------
(torch.Tensor, torch.Tensor)
"""
if not resolution or all(_ == 0 for _ in resolution):
return source.unsqueeze(1), target.unsqueeze(1) # Added channel dimension.
source_abs = T.center_crop(source, resolution).unsqueeze(1) # Added channel dimension.
target_abs = T.center_crop(target, resolution).unsqueeze(1) # Added channel dimension.
return source_abs, target_abs
class MRIModelEngine(Engine):
"""Engine for MRI models.
Each child class should implement their own :meth:`_do_iteration` method.
"""
def __init__(
self,
cfg: BaseConfig,
model: nn.Module,
device: str,
forward_operator: Optional[Callable] = None,
backward_operator: Optional[Callable] = None,
mixed_precision: bool = False,
**models: nn.Module,
):
"""Inits :class:`MRIModelEngine`.
Parameters
----------
cfg: BaseConfig
Configuration file.
model: nn.Module
Model.
device: str
Device. Can be "cuda" or "cpu".
forward_operator: Callable, optional
The forward operator. Default: None.
backward_operator: Callable, optional
The backward operator. Default: None.
mixed_precision: bool
Use mixed precision. Default: False.
**models: nn.Module
Additional models.
"""
super().__init__(
cfg,
model,
device,
forward_operator=forward_operator,
backward_operator=backward_operator,
mixed_precision=mixed_precision,
**models,
)
self._complex_dim = -1
self._coil_dim = 1
@abstractmethod
def _do_iteration(
self,
data: Dict[str, torch.Tensor],
loss_fns: Optional[Dict[str, Callable]] = None,
regularizer_fns: Optional[Dict[str, Callable]] = None,
) -> DoIterationOutput:
"""To be implemented by child class.
Should output a :meth:`DoIterationOutput` object with `output_image`, `sensitivity_map` and
`data_dict` attributes.
"""
def build_loss(self) -> Dict:
# TODO: Cropper is a processing output tool.
def get_resolution(**data):
"""Be careful that this will use the cropping size of the FIRST sample in the batch."""
return _compute_resolution(self.cfg.training.loss.crop, data.get("reconstruction_size", None))
# TODO(jt) Ideally this is also configurable:
# - Do in steps (use insertation order)
# Crop -> then loss.
def l1_loss(source: torch.Tensor, reduction: str = "mean", **data) -> torch.Tensor:
"""Calculate L1 loss given source and target.
Parameters
----------
source: torch.Tensor
Has shape (batch, [complex=2,] height, width)
reduction: str
Reduction type. Can be "sum" or "mean".
data: Dict[str, torch.Tensor]
Contains key "target" with value a tensor of shape (batch, height, width)
Returns
-------
l1_loss: torch.Tensor
L1 loss.
"""
resolution = get_resolution(**data)
l1_loss = F.l1_loss(
*_crop_volume(
T.modulus_if_complex(source, complex_axis=self._complex_dim), data["target"], resolution
),
reduction=reduction,
)
return l1_loss
def l2_loss(source: torch.Tensor, reduction: str = "mean", **data) -> torch.Tensor:
"""Calculate L2 loss (MSE) given source and target.
Parameters
----------
source: torch.Tensor
Has shape (batch, [complex=2,] height, width)
reduction: str
Reduction type. Can be "sum" or "mean".
data: Dict[str, torch.Tensor]
Contains key "target" with value a tensor of shape (batch, height, width)
Returns
-------
l2_loss: torch.Tensor
L2 loss.
"""
resolution = get_resolution(**data)
l2_loss = F.mse_loss(
*_crop_volume(
T.modulus_if_complex(source, complex_axis=self._complex_dim), data["target"], resolution
),
reduction=reduction,
)
return l2_loss
def ssim_loss(source: torch.Tensor, reduction: str = "mean", **data) -> torch.Tensor:
"""Calculate SSIM loss given source and target.
Parameters
----------
source: torch.Tensor
Has shape (batch, [complex=2,] height, width)
reduction: str
Reduction type. Can be "sum" or "mean".
data: Dict[str, torch.Tensor]
Contains key "target" with value a tensor of shape (batch, height, width)
Returns
-------
ssim_loss: torch.Tensor
SSIM loss.
"""
resolution = get_resolution(**data)
if reduction != "mean":
raise AssertionError(
f"SSIM loss can only be computed with reduction == 'mean'." f" Got reduction == {reduction}."
)
source_abs, target_abs = _crop_volume(
T.modulus_if_complex(source, complex_axis=self._complex_dim), data["target"], resolution
)
data_range = torch.tensor([target_abs.max()], device=target_abs.device)
ssim_loss = SSIMLoss().to(source_abs.device).forward(source_abs, target_abs, data_range=data_range)
return ssim_loss
# Build losses
loss_dict = {}
for curr_loss in self.cfg.training.loss.losses: # type: ignore
loss_fn = curr_loss.function
if loss_fn == "l1_loss":
loss_dict[loss_fn] = multiply_function(curr_loss.multiplier, l1_loss)
elif loss_fn == "l2_loss":
loss_dict[loss_fn] = multiply_function(curr_loss.multiplier, l2_loss)
elif loss_fn == "ssim_loss":
loss_dict[loss_fn] = multiply_function(curr_loss.multiplier, ssim_loss)
else:
raise ValueError(f"{loss_fn} not permissible.")
return loss_dict
def compute_sensitivity_map(self, sensitivity_map: torch.Tensor) -> torch.Tensor:
"""Computes sensitivity maps :math:`\{S^k\}_{k=1}^{n_c}` if `sensitivity_model` is available.
:math:`\{S^k\}_{k=1}^{n_c}` are normalized such that
.. math::
\sum_{k=1}^{n_c}S^k {S^k}^* = I.
Parameters
----------
sensitivity_map: torch.Tensor
Sensitivity maps of shape (batch, coil, height, width, complex=2).
Returns
-------
sensitivity_map: torch.Tensor
Normalized and refined sensitivity maps of shape (batch, coil, height, width, complex=2).
"""
# Some things can be done with the sensitivity map here, e.g. apply a u-net
if "sensitivity_model" in self.models:
# Move channels to first axis
sensitivity_map = sensitivity_map.permute(
(0, 1, 4, 2, 3)
) # shape (batch, coil, complex=2, height, width)
sensitivity_map = self.compute_model_per_coil("sensitivity_model", sensitivity_map).permute(
(0, 1, 3, 4, 2)
) # has channel last: shape (batch, coil, height, width, complex=2)
# The sensitivity map needs to be normalized such that
# So \sum_{i \in \text{coils}} S_i S_i^* = 1
sensitivity_map_norm = torch.sqrt(
((sensitivity_map**2).sum(self._complex_dim)).sum(self._coil_dim)
) # shape (batch, height, width)
sensitivity_map_norm = sensitivity_map_norm.unsqueeze(self._coil_dim).unsqueeze(self._complex_dim)
return T.safe_divide(sensitivity_map, sensitivity_map_norm)
@torch.no_grad()
def reconstruct_volumes( # type: ignore
self,
data_loader: DataLoader,
loss_fns: Optional[Dict[str, Callable]] = None,
regularizer_fns: Optional[Dict[str, Callable]] = None,
add_target: bool = True,
crop: Optional[str] = None,
):
"""Validation process. Assumes that each batch only contains slices of the same volume *AND* that these are
sequentially ordered.
Parameters
----------
data_loader: DataLoader
loss_fns: Dict[str, Callable], optional
regularizer_fns: Dict[str, Callable], optional
add_target: bool
If true, will add the target to the output
crop: str, optional
Crop type.
Yields
------
(curr_volume, [curr_target,] loss_dict_list, filename): torch.Tensor, [torch.Tensor,], dict, pathlib.Path
# TODO(jt): visualization should be a namedtuple or a dict or so
"""
# pylint: disable=too-many-locals, arguments-differ
self.models_to_device()
self.models_validation_mode()
torch.cuda.empty_cache()
# Let us inspect this data
all_filenames = list(data_loader.dataset.volume_indices.keys()) # type: ignore
num_for_this_process = len(list(data_loader.batch_sampler.sampler.volume_indices.keys())) # type: ignore
self.logger.info(
"Reconstructing a total of %s volumes. This process has %s volumes (world size: %s).",
len(all_filenames),
num_for_this_process,
communication.get_world_size(),
)
last_filename = None # At the start of evaluation, there are no filenames.
curr_volume = None
curr_target = None
slice_counter = 0
filenames_seen = 0
# Loop over dataset. This requires the use of direct.data.sampler.DistributedSequentialSampler as this sampler
# splits the data over the different processes, and outputs the slices linearly. The implicit assumption here is
# that the slices are outputted from the Dataset *sequentially* for each volume one by one, and each batch only
# contains data from one volume.
time_start = time.time()
loss_dict_list = []
# TODO: Use iter_idx to keep track of volume
for _, data in enumerate(data_loader):
torch.cuda.empty_cache()
gc.collect()
filename = _get_filename_from_batch(data)
if last_filename is None:
last_filename = filename # First iteration last_filename is not set.
if last_filename != filename:
curr_volume = None
curr_target = None
slice_counter = 0
last_filename = filename
scaling_factors = data["scaling_factor"].clone()
resolution = _compute_resolution(
key=crop,
reconstruction_size=data.get("reconstruction_size", None),
)
# Compute output
iteration_output = self._do_iteration(data, loss_fns=loss_fns, regularizer_fns=regularizer_fns)
output = iteration_output.output_image
loss_dict = iteration_output.data_dict
# Output can be complex-valued, and has to be cropped. This holds for both output and target.
output_abs = _process_output(
output,
scaling_factors,
resolution=resolution,
complex_axis=self._complex_dim,
)
if add_target:
target_abs = _process_output(
data["target"],
scaling_factors,
resolution=resolution,
complex_axis=self._complex_dim,
)
if curr_volume is None:
volume_size = len(data_loader.batch_sampler.sampler.volume_indices[filename]) # type: ignore
curr_volume = torch.zeros(*(volume_size, *output_abs.shape[1:]), dtype=output_abs.dtype)
loss_dict_list.append(loss_dict)
if add_target:
curr_target = curr_volume.clone()
curr_volume[slice_counter : slice_counter + output_abs.shape[0], ...] = output_abs.cpu()
if add_target:
curr_target[slice_counter : slice_counter + output_abs.shape[0], ...] = target_abs.cpu() # type: ignore
slice_counter += output_abs.shape[0]
# Check if we had the last batch
if slice_counter == volume_size:
filenames_seen += 1
self.logger.info(
"%i of %i volumes reconstructed: %s (shape = %s) in %.3fs.",
filenames_seen,
num_for_this_process,
last_filename,
list(curr_volume.shape),
time.time() - time_start,
)
# Maybe not needed.
del data
yield (curr_volume, curr_target, reduce_list_of_dicts(loss_dict_list), filename) if add_target else (
curr_volume,
reduce_list_of_dicts(loss_dict_list),
filename,
)
@torch.no_grad()
def evaluate( # type: ignore
self,
data_loader: DataLoader,
loss_fns: Optional[Dict[str, Callable]],
):
"""Validation process.
Assumes that each batch only contains slices of the same volume *AND* that these are sequentially ordered.
Parameters
----------
data_loader: DataLoader
loss_fns: Dict[str, Callable], optional
Returns
-------
loss_dict, all_gathered_metrics, visualize_slices, visualize_target
"""
# TODO(jt): visualization should be a namedtuple or a dict or so
# TODO(gy): Implement visualization of extra keys. E.g. sensitivity_map.
# pylint: disable=arguments-differ, too-many-locals
self.models_to_device()
self.models_validation_mode()
torch.cuda.empty_cache()
volume_metrics = self.build_metrics(self.cfg.validation.metrics) # type: ignore
val_losses = []
val_volume_metrics: Dict[PathLike, Dict] = defaultdict(dict)
# Container to for the slices which can be visualized in TensorBoard.
visualize_slices: List[np.ndarray] = []
visualize_target: List[np.ndarray] = []
for _, output in enumerate(
self.reconstruct_volumes(
data_loader, loss_fns=loss_fns, add_target=True, crop=self.cfg.validation.crop # type: ignore
)
):
volume, target, volume_loss_dict, filename = output
curr_metrics = {
metric_name: metric_fn(target, volume).clone() for metric_name, metric_fn in volume_metrics.items()
}
curr_metrics_string = ", ".join([f"{x}: {float(y)}" for x, y in curr_metrics.items()])
self.logger.info("Metrics for %s: %s", filename, curr_metrics_string)
# TODO: Path can be tricky if it is not unique (e.g. image.h5)
val_volume_metrics[filename.name] = curr_metrics
val_losses.append(volume_loss_dict)
# Log the center slice of the volume
if len(visualize_slices) < self.cfg.logging.tensorboard.num_images: # type: ignore
visualize_slices.append(volume[volume.shape[0] // 2])
visualize_target.append(target[target.shape[0] // 2])
# Average loss dict
loss_dict = reduce_list_of_dicts(val_losses)
reduce_tensor_dict(loss_dict)
communication.synchronize()
torch.cuda.empty_cache()
# TODO: Does not work yet with normal gather.
all_gathered_metrics = merge_list_of_dicts(communication.all_gather(val_volume_metrics))
return loss_dict, all_gathered_metrics, visualize_slices, visualize_target
def compute_model_per_coil(self, model_name: str, data: torch.Tensor) -> torch.Tensor:
"""Performs forward pass of model `model_name` in `self.models` per coil.
Parameters
----------
model_name: str
Model to run.
data: torch.Tensor
Multi-coil data of shape (batch, coil, complex=2, height, width).
Returns
-------
output: torch.Tensor
Computed output per coil.
"""
output = []
for idx in range(data.size(self._coil_dim)):
subselected_data = data.select(self._coil_dim, idx)
output.append(self.models[model_name](subselected_data))
return torch.stack(output, dim=self._coil_dim)
def _process_output(
data: torch.Tensor,
scaling_factors: Optional[torch.Tensor] = None,
resolution: Optional[Union[List[int], Tuple[int]]] = None,
complex_axis: Optional[int] = -1,
) -> torch.Tensor:
"""Crops and scales input tensor.
Parameters
----------
data: torch.Tensor
scaling_factors: Optional[torch.Tensor]
Scaling factor. Default: None.
resolution: Optional[Union[List[int], Tuple[int]]]
Resolution. Default: None.
complex_axis: Optional[int]
Dimension along which modulus of `data` will be computed (if it's complex). Default: -1 (last).
Returns
-------
torch.Tensor
"""
# data is of shape (batch, complex=2, height, width)
if scaling_factors is not None:
data = data * scaling_factors.view(-1, *((1,) * (len(data.shape) - 1))).to(data.device)
data = T.modulus_if_complex(data, complex_axis=complex_axis)
if len(data.shape) == 3: # (batch, height, width)
data = data.unsqueeze(1) # Added channel dimension.
if resolution is not None:
data = T.center_crop(data, resolution).contiguous()
return data
def _compute_resolution(
key: Optional[str], reconstruction_size: Optional[Union[List[int], Tuple[int]]] = None
) -> Union[List[int], None]:
"""Computes resolution.
Parameters
----------
key: str
Can be `header` or None.
reconstruction_size: Optional[Union[List[int], Tuple[int]]]
Reconstruction size. Default: None.
Returns
-------
resolution: Union[str, List[int], None]
Resolution of reconstruction.
"""
if key == "header":
# This will be of the form [tensor(x_0, x_1, ...), tensor(y_0, y_1,...), tensor(z_0, z_1, ...)] over
# batches.
resolution = [_.detach().cpu().numpy().tolist() for _ in reconstruction_size] # type: ignore
# The volume sampler should give validation indices belonging to the *same* volume, so it should be
# safe taking the first element, the matrix size are in x,y,z (we work in z,x,y).
resolution = [_[0] for _ in resolution][:-1]
return resolution
elif not key:
return None
else:
raise ValueError("Cropping should be either set to `header` to get the values from the header or None.")
def _get_filename_from_batch(data: dict) -> pathlib.Path:
filenames = data.pop("filename")
if len(set(filenames)) != 1:
raise ValueError(
f"Expected a batch during validation to only contain filenames of one case. " f"Got {set(filenames)}."
)
# This can be fixed when there is a custom collate_fn
return pathlib.Path(filenames[0])
| [
"torch.zeros",
"torch.no_grad",
"torch.stack",
"torch.cuda.empty_cache"
] | 1.10.2 | NKI-AI/direct | 7c9d59345b68ea70bc20a5cea2c895c6059f8e46 |
0.2 | #!/usr/bin/env python3
"""
An entry into the ml4seti signal classifier competition.
This entry is simply a large densenet architecture convolutional neural
network. For more information, see "Densely Connected Convolutional Networks"
<https://arxiv.org/pdf/1608.06993.pdf>
"""
import argparse
import time
import sklearn.metrics
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim.lr_scheduler
import tabulate
from dataset import Dataset
from model import DenseNet
from util import tprint, stats
class Experiment(object):
def __init__(self, directory, epochs=1, cuda=False, save=False,
log_interval=30, load=None, split=(0.6, 0.2, 0.2), cache=False,
minibatch_size=10, pretrained=False):
self.dataset = Dataset(directory, split=split, cache=cache,
minibatch_size=minibatch_size)
self.epochs = epochs
self.cuda = cuda
self.save = save
self.log_interval = log_interval
self.model = DenseNet(pretrained)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
if load is not None:
state = torch.load(load)
self.model.load_state_dict(state['model'])
self.optimizer.load_state_dict(state['optim'])
if cuda:
self.model = self.model.cuda()
def train(self):
print('Training %s epochs.' % self.epochs)
loss_fun = nn.CrossEntropyLoss()
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
'min',
verbose=True,
patience=3
)
last_print = time.time()
for epoch in range(self.epochs):
tprint('Starting epoch: %s' % epoch)
self.model.train()
self.optimizer.zero_grad()
for minibatch, targets in self.dataset.train:
minibatch = Variable(torch.stack(minibatch))
targets = Variable(torch.LongTensor(targets))
if self.cuda:
minibatch = minibatch.cuda()
targets = targets.cuda()
out = self.model.forward(minibatch)
loss = loss_fun(out, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
if time.time() - last_print > self.log_interval:
last_print = time.time()
numer, denom = self.dataset.train.progress()
tprint('Training: %s, %s/%s' % (epoch, numer, denom))
tprint('Training complete. Beginning validation.')
self.dataset.train.reload()
self.model.eval()
last_print = time.time()
for minibatch, targets in self.dataset.validate:
minibatch = Variable(torch.stack(minibatch), volatile=True)
targets = Variable(torch.LongTensor(targets), volatile=True)
if self.cuda:
minibatch = minibatch.cuda()
targets = targets.cuda()
out = self.model.forward(minibatch)
validation_loss = loss_fun(out, targets)
if time.time() - last_print > self.log_interval:
last_print = time.time()
numer, denom = self.dataset.validate.progress()
tprint('Validating: %s, %s/%s' % (epoch, numer, denom))
self.dataset.validate.reload()
scheduler.step(validation_loss.data[0])
if self.save:
torch.save({
'model': self.model.state_dict(),
'optim': self.optimizer.state_dict(),
}, 'signet.%s.pth' % int(time.time()))
def test(self):
tprint('Beginning testing.')
confusion_matrix = np.zeros((7, 7)).astype(np.int)
last_print = time.time()
for minibatch, targets in self.dataset.test:
minibatch = Variable(torch.stack(minibatch), volatile=True)
targets = Variable(torch.LongTensor(targets), volatile=True)
if self.cuda:
minibatch = minibatch.cuda()
targets = targets.cuda()
out = self.model.forward(minibatch)
_, predicted = torch.max(out.data, 1)
predicted = predicted.cpu().numpy()
targets = targets.data.cpu().numpy()
confusion_matrix += sklearn.metrics.confusion_matrix(
predicted,
targets,
labels=[0, 1, 2, 3, 4, 5, 6]
).astype(np.int)
if time.time() - last_print > self.log_interval:
last_print = time.time()
numer, denom = self.dataset.test.progress()
tprint('Testing: %s/%s' % (numer, denom))
tprint('Testing complete.')
print(confusion_matrix)
print(tabulate.tabulate(stats(confusion_matrix)))
def valid_split(arg):
split = arg.split(',')
if len(split) != 3:
raise argparse.ArgumentTypeError("invalid split argument")
try:
train = float(split[0])
valid = float(split[1])
test = float(split[2])
except ValueError:
raise argparse.ArgumentTypeError("split args must be numbers")
denom = train + valid + test
return train/denom, valid/denom, test/denom
def main():
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Directory containing full dataset')
parser.add_argument(
'-e',
'--epochs',
type=int,
default=1,
help='Number of epochs to train')
parser.add_argument('-c', '--cuda', action='store_true', default=False)
parser.add_argument(
'--train',
action='store_true',
default=False,
help='flag to signal script should train the model')
parser.add_argument(
'--test',
action='store_true',
default=False,
help='flag to signal script should test the model')
parser.add_argument(
'-s',
'--save',
action='store_true',
default=False, help='Will cause model to be saved at end of training')
parser.add_argument(
'-l',
'--log-interval',
type=int,
default=30,
help='# of seconds between log line prints')
parser.add_argument(
'-m',
'--model',
default=None,
help='path to a pretrained model')
parser.add_argument(
'-p',
'--split',
default=(0.6, 0.2, 0.2),
type=valid_split,
help='train/validation/test set split')
parser.add_argument(
'--cache',
default=False,
action='store_true',
help='flag to cache processed spectrograms')
parser.add_argument(
'-b',
'--minibatch-size',
type=int,
default=10,
help='size of each minibatch')
parser.add_argument(
'--pretrained',
default=False,
action='store_true',
help='use DenseNet pretrained on ImageNet')
args = parser.parse_args()
if args.train or args.test:
experiment = Experiment(
args.directory,
epochs=args.epochs,
cuda=args.cuda,
save=args.save,
log_interval=args.log_interval,
load=args.model,
split=args.split,
cache=args.cache,
minibatch_size=args.minibatch_size,
pretrained=args.pretrained)
if args.train:
experiment.train()
if args.test:
experiment.test()
if __name__ == '__main__':
main()
| [
"torch.stack",
"torch.max",
"torch.LongTensor",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 0.2.0 | sagelywizard/ml4seti | d56ffd46ca598ff4d44d2bc274acb7dc59389acd |
1.6 | import os
import time
import glob
import argparse
import os.path as osp
from tqdm import tqdm
from typing import Optional, List, NamedTuple
import numpy as np
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import ModuleList, Sequential, Linear, BatchNorm1d, ReLU, Dropout
from torch.optim.lr_scheduler import StepLR
from pytorch_lightning.metrics import Accuracy
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import (LightningDataModule, LightningModule, Trainer,
seed_everything)
from torch_sparse import SparseTensor
from torch_geometric.nn import SAGEConv, GATConv
from torch_geometric.data import NeighborSampler
from ogb.lsc import MAG240MDataset, MAG240MEvaluator
from root import ROOT
class Batch(NamedTuple):
x: Tensor
y: Tensor
adjs_t: List[SparseTensor]
def to(self, *args, **kwargs):
return Batch(
x=self.x.to(*args, **kwargs),
y=self.y.to(*args, **kwargs),
adjs_t=[adj_t.to(*args, **kwargs) for adj_t in self.adjs_t],
)
def get_col_slice(x, start_row_idx, end_row_idx, start_col_idx, end_col_idx):
outs = []
chunk = 100000
for i in tqdm(range(start_row_idx, end_row_idx, chunk)):
j = min(i + chunk, end_row_idx)
outs.append(x[i:j, start_col_idx:end_col_idx].copy())
return np.concatenate(outs, axis=0)
def save_col_slice(x_src, x_dst, start_row_idx, end_row_idx, start_col_idx,
end_col_idx):
assert x_src.shape[0] == end_row_idx - start_row_idx
assert x_src.shape[1] == end_col_idx - start_col_idx
chunk, offset = 100000, start_row_idx
for i in tqdm(range(0, end_row_idx - start_row_idx, chunk)):
j = min(i + chunk, end_row_idx - start_row_idx)
x_dst[offset + i:offset + j, start_col_idx:end_col_idx] = x_src[i:j]
class MAG240M(LightningDataModule):
def __init__(self, data_dir: str, batch_size: int, sizes: List[int],
in_memory: bool = False):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.sizes = sizes
self.in_memory = in_memory
@property
def num_features(self) -> int:
return 768
@property
def num_classes(self) -> int:
return 153
@property
def num_relations(self) -> int:
return 5
def prepare_data(self):
dataset = MAG240MDataset(self.data_dir)
path = f'{dataset.dir}/paper_to_paper_symmetric.pt'
if not osp.exists(path): # Will take approximately 5 minutes...
t = time.perf_counter()
print('Converting adjacency matrix...', end=' ', flush=True)
edge_index = dataset.edge_index('paper', 'cites', 'paper')
edge_index = torch.from_numpy(edge_index)
adj_t = SparseTensor(
row=edge_index[0], col=edge_index[1],
sparse_sizes=(dataset.num_papers, dataset.num_papers),
is_sorted=True)
torch.save(adj_t.to_symmetric(), path)
print(f'Done! [{time.perf_counter() - t:.2f}s]')
path = f'{dataset.dir}/full_adj_t.pt'
if not osp.exists(path): # Will take approximately 16 minutes...
t = time.perf_counter()
print('Merging adjacency matrices...', end=' ', flush=True)
row, col, _ = torch.load(
f'{dataset.dir}/paper_to_paper_symmetric.pt').coo()
rows, cols = [row], [col]
edge_index = dataset.edge_index('author', 'writes', 'paper')
row, col = torch.from_numpy(edge_index)
row += dataset.num_papers
rows += [row, col]
cols += [col, row]
edge_index = dataset.edge_index('author', 'institution')
row, col = torch.from_numpy(edge_index)
row += dataset.num_papers
col += dataset.num_papers + dataset.num_authors
rows += [row, col]
cols += [col, row]
edge_types = [
torch.full(x.size(), i, dtype=torch.int8)
for i, x in enumerate(rows)
]
row = torch.cat(rows, dim=0)
del rows
col = torch.cat(cols, dim=0)
del cols
N = (dataset.num_papers + dataset.num_authors +
dataset.num_institutions)
perm = (N * row).add_(col).numpy().argsort()
perm = torch.from_numpy(perm)
row = row[perm]
col = col[perm]
edge_type = torch.cat(edge_types, dim=0)[perm]
del edge_types
full_adj_t = SparseTensor(row=row, col=col, value=edge_type,
sparse_sizes=(N, N), is_sorted=True)
torch.save(full_adj_t, path)
print(f'Done! [{time.perf_counter() - t:.2f}s]')
path = f'{dataset.dir}/full_feat.npy'
done_flag_path = f'{dataset.dir}/full_feat_done.txt'
if not osp.exists(done_flag_path): # Will take ~3 hours...
t = time.perf_counter()
print('Generating full feature matrix...')
node_chunk_size = 100000
dim_chunk_size = 64
N = (dataset.num_papers + dataset.num_authors +
dataset.num_institutions)
paper_feat = dataset.paper_feat
x = np.memmap(path, dtype=np.float16, mode='w+',
shape=(N, self.num_features))
print('Copying paper features...')
for i in tqdm(range(0, dataset.num_papers, node_chunk_size)):
j = min(i + node_chunk_size, dataset.num_papers)
x[i:j] = paper_feat[i:j]
edge_index = dataset.edge_index('author', 'writes', 'paper')
row, col = torch.from_numpy(edge_index)
adj_t = SparseTensor(
row=row, col=col,
sparse_sizes=(dataset.num_authors, dataset.num_papers),
is_sorted=True)
# Processing 64-dim subfeatures at a time for memory efficiency.
print('Generating author features...')
for i in tqdm(range(0, self.num_features, dim_chunk_size)):
j = min(i + dim_chunk_size, self.num_features)
inputs = get_col_slice(paper_feat, start_row_idx=0,
end_row_idx=dataset.num_papers,
start_col_idx=i, end_col_idx=j)
inputs = torch.from_numpy(inputs)
outputs = adj_t.matmul(inputs, reduce='mean').numpy()
del inputs
save_col_slice(
x_src=outputs, x_dst=x, start_row_idx=dataset.num_papers,
end_row_idx=dataset.num_papers + dataset.num_authors,
start_col_idx=i, end_col_idx=j)
del outputs
edge_index = dataset.edge_index('author', 'institution')
row, col = torch.from_numpy(edge_index)
adj_t = SparseTensor(
row=col, col=row,
sparse_sizes=(dataset.num_institutions, dataset.num_authors),
is_sorted=False)
print('Generating institution features...')
# Processing 64-dim subfeatures at a time for memory efficiency.
for i in tqdm(range(0, self.num_features, dim_chunk_size)):
j = min(i + dim_chunk_size, self.num_features)
inputs = get_col_slice(
x, start_row_idx=dataset.num_papers,
end_row_idx=dataset.num_papers + dataset.num_authors,
start_col_idx=i, end_col_idx=j)
inputs = torch.from_numpy(inputs)
outputs = adj_t.matmul(inputs, reduce='mean').numpy()
del inputs
save_col_slice(
x_src=outputs, x_dst=x,
start_row_idx=dataset.num_papers + dataset.num_authors,
end_row_idx=N, start_col_idx=i, end_col_idx=j)
del outputs
x.flush()
del x
print(f'Done! [{time.perf_counter() - t:.2f}s]')
with open(done_flag_path, 'w') as f:
f.write('done')
def setup(self, stage: Optional[str] = None):
t = time.perf_counter()
print('Reading dataset...', end=' ', flush=True)
dataset = MAG240MDataset(self.data_dir)
self.train_idx = torch.from_numpy(dataset.get_idx_split('train'))
self.train_idx = self.train_idx
self.train_idx.share_memory_()
self.val_idx = torch.from_numpy(dataset.get_idx_split('valid'))
self.val_idx.share_memory_()
self.test_idx = torch.from_numpy(dataset.get_idx_split('test'))
self.test_idx.share_memory_()
N = dataset.num_papers + dataset.num_authors + dataset.num_institutions
x = np.memmap(f'{dataset.dir}/full_feat.npy', dtype=np.float16,
mode='r', shape=(N, self.num_features))
if self.in_memory:
self.x = np.empty((N, self.num_features), dtype=np.float16)
self.x[:] = x
self.x = torch.from_numpy(self.x).share_memory_()
else:
self.x = x
self.y = torch.from_numpy(dataset.all_paper_label)
path = f'{dataset.dir}/full_adj_t.pt'
self.adj_t = torch.load(path)
print(f'Done! [{time.perf_counter() - t:.2f}s]')
def train_dataloader(self):
return NeighborSampler(self.adj_t, node_idx=self.train_idx,
sizes=self.sizes, return_e_id=False,
transform=self.convert_batch,
batch_size=self.batch_size, shuffle=True,
num_workers=4)
def val_dataloader(self):
return NeighborSampler(self.adj_t, node_idx=self.val_idx,
sizes=self.sizes, return_e_id=False,
transform=self.convert_batch,
batch_size=self.batch_size, num_workers=2)
def test_dataloader(self): # Test best validation model once again.
return NeighborSampler(self.adj_t, node_idx=self.val_idx,
sizes=self.sizes, return_e_id=False,
transform=self.convert_batch,
batch_size=self.batch_size, num_workers=2)
def hidden_test_dataloader(self):
return NeighborSampler(self.adj_t, node_idx=self.test_idx,
sizes=self.sizes, return_e_id=False,
transform=self.convert_batch,
batch_size=self.batch_size, num_workers=3)
def convert_batch(self, batch_size, n_id, adjs):
if self.in_memory:
x = self.x[n_id].to(torch.float)
else:
x = torch.from_numpy(self.x[n_id.numpy()]).to(torch.float)
y = self.y[n_id[:batch_size]].to(torch.long)
return Batch(x=x, y=y, adjs_t=[adj_t for adj_t, _, _ in adjs])
class RGNN(LightningModule):
def __init__(self, model: str, in_channels: int, out_channels: int,
hidden_channels: int, num_relations: int, num_layers: int,
heads: int = 4, dropout: float = 0.5):
super().__init__()
self.save_hyperparameters()
self.model = model.lower()
self.num_relations = num_relations
self.dropout = dropout
self.convs = ModuleList()
self.norms = ModuleList()
self.skips = ModuleList()
if self.model == 'rgat':
self.convs.append(
ModuleList([
GATConv(in_channels, hidden_channels // heads, heads,
add_self_loops=False) for _ in range(num_relations)
]))
for _ in range(num_layers - 1):
self.convs.append(
ModuleList([
GATConv(hidden_channels, hidden_channels // heads,
heads, add_self_loops=False)
for _ in range(num_relations)
]))
elif self.model == 'rgraphsage':
self.convs.append(
ModuleList([
SAGEConv(in_channels, hidden_channels, root_weight=False)
for _ in range(num_relations)
]))
for _ in range(num_layers - 1):
self.convs.append(
ModuleList([
SAGEConv(hidden_channels, hidden_channels,
root_weight=False)
for _ in range(num_relations)
]))
for _ in range(num_layers):
self.norms.append(BatchNorm1d(hidden_channels))
self.skips.append(Linear(in_channels, hidden_channels))
for _ in range(num_layers - 1):
self.skips.append(Linear(hidden_channels, hidden_channels))
self.mlp = Sequential(
Linear(hidden_channels, hidden_channels),
BatchNorm1d(hidden_channels),
ReLU(inplace=True),
Dropout(p=self.dropout),
Linear(hidden_channels, out_channels),
)
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.test_acc = Accuracy()
def forward(self, x: Tensor, adjs_t: List[SparseTensor]) -> Tensor:
for i, adj_t in enumerate(adjs_t):
x_target = x[:adj_t.size(0)]
out = self.skips[i](x_target)
for j in range(self.num_relations):
edge_type = adj_t.storage.value() == j
subadj_t = adj_t.masked_select_nnz(edge_type, layout='coo')
if subadj_t.nnz() > 0:
out += self.convs[i][j]((x, x_target), subadj_t)
x = self.norms[i](out)
x = F.elu(x) if self.model == 'rgat' else F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return self.mlp(x)
def training_step(self, batch, batch_idx: int):
y_hat = self(batch.x, batch.adjs_t)
train_loss = F.cross_entropy(y_hat, batch.y)
self.train_acc(y_hat.softmax(dim=-1), batch.y)
self.log('train_acc', self.train_acc, prog_bar=True, on_step=False,
on_epoch=True)
return train_loss
def validation_step(self, batch, batch_idx: int):
y_hat = self(batch.x, batch.adjs_t)
self.val_acc(y_hat.softmax(dim=-1), batch.y)
self.log('val_acc', self.val_acc, on_step=False, on_epoch=True,
prog_bar=True, sync_dist=True)
def test_step(self, batch, batch_idx: int):
y_hat = self(batch.x, batch.adjs_t)
self.test_acc(y_hat.softmax(dim=-1), batch.y)
self.log('test_acc', self.test_acc, on_step=False, on_epoch=True,
prog_bar=True, sync_dist=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
scheduler = StepLR(optimizer, step_size=25, gamma=0.25)
return [optimizer], [scheduler]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--hidden_channels', type=int, default=1024)
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--model', type=str, default='rgat',
choices=['rgat', 'rgraphsage'])
parser.add_argument('--sizes', type=str, default='25-15')
parser.add_argument('--in-memory', action='store_true')
parser.add_argument('--device', type=str, default='0')
parser.add_argument('--evaluate', action='store_true')
args = parser.parse_args()
args.sizes = [int(i) for i in args.sizes.split('-')]
print(args)
seed_everything(42)
datamodule = MAG240M(ROOT, args.batch_size, args.sizes, args.in_memory)
if not args.evaluate:
model = RGNN(args.model, datamodule.num_features,
datamodule.num_classes, args.hidden_channels,
datamodule.num_relations, num_layers=len(args.sizes),
dropout=args.dropout)
print(f'#Params {sum([p.numel() for p in model.parameters()])}')
checkpoint_callback = ModelCheckpoint(monitor='val_acc', save_top_k=1)
trainer = Trainer(gpus=args.device, max_epochs=args.epochs,
callbacks=[checkpoint_callback],
default_root_dir=f'logs/{args.model}')
trainer.fit(model, datamodule=datamodule)
if args.evaluate:
dirs = glob.glob(f'logs/{args.model}/lightning_logs/*')
version = max([int(x.split(os.sep)[-1].split('_')[-1]) for x in dirs])
logdir = f'logs/{args.model}/lightning_logs/version_{version}'
print(f'Evaluating saved model in {logdir}...')
ckpt = glob.glob(f'{logdir}/checkpoints/*')[0]
trainer = Trainer(gpus=args.device, resume_from_checkpoint=ckpt)
model = RGNN.load_from_checkpoint(
checkpoint_path=ckpt, hparams_file=f'{logdir}/hparams.yaml')
datamodule.batch_size = 16
datamodule.sizes = [160] * len(args.sizes) # (Almost) no sampling...
trainer.test(model=model, datamodule=datamodule)
evaluator = MAG240MEvaluator()
loader = datamodule.hidden_test_dataloader()
model.eval()
y_preds = []
for batch in tqdm(loader):
batch = batch.to(int(args.device))
with torch.no_grad():
out = model(batch.x, batch.adjs_t).argmax(dim=-1).cpu()
y_preds.append(out)
res = {'y_pred': torch.cat(y_preds, dim=0)}
evaluator.save_test_submission(res, f'results/{args.model}')
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.ModuleList",
"torch.save",
"torch.nn.functional.dropout",
"torch.nn.functional.elu",
"torch.no_grad",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.functional.cross_entropy",
"torch.nn.BatchNorm1d",
"torch.load",
"torch.nn.functional.relu"
] | 1.6.0 | tsotfsk/ogb | 3c24dd5f67a13426d5f39c7a71a460499713631a |
1.2 | from typing import Callable
import pytest
import torch
from rainy.net import GruBlock, LstmBlock, RnnBlock
from rainy.utils import Device
@pytest.mark.parametrize("rnn_gen", [GruBlock, LstmBlock])
def test_rnn(rnn_gen: Callable[[int, int], RnnBlock]) -> None:
TIME_STEP = 10
BATCH_SIZE = 5
INPUT_DIM = 20
OUTPUT_DIM = 3
rnn = rnn_gen(INPUT_DIM, OUTPUT_DIM)
device = Device()
rnn.to(device.unwrapped)
hidden = rnn.initial_state(BATCH_SIZE, device)
cached_inputs = []
for i in range(TIME_STEP):
inputs = torch.randn(BATCH_SIZE, INPUT_DIM, device=device.unwrapped)
cached_inputs.append(inputs.detach())
out, hidden = rnn(inputs, hidden)
assert tuple(out.shape) == (BATCH_SIZE, OUTPUT_DIM)
batch_inputs = torch.cat(cached_inputs)
hidden = rnn.initial_state(BATCH_SIZE, device)
out, _ = rnn(batch_inputs, hidden)
assert tuple(out.shape) == (TIME_STEP * BATCH_SIZE, OUTPUT_DIM)
| [
"torch.cat",
"torch.randn"
] | 1.2 | kngwyu/Rainy | 535f8f49d9efe0324a8480d0165ab1b35d83dc19 |
1.2 | """
This module has an implementation of ACKTR, which is described in
- Scalable trust-region method for deep reinforcement learning using
Kronecker-factored approximation
- URL: https://arxiv.org/abs/1708.05144
"""
import torch
from ..config import Config
from ..net import Policy
from .a2c import A2CAgent
class ACKTRAgent(A2CAgent):
def __init__(self, config: Config) -> None:
super().__init__(config)
if self.net.is_recurrent:
raise NotImplementedError("K-FAC for RNN is not implemented!")
self.precond = config.preconditioner(self.net)
def _pre_backward(self, policy: Policy, value: torch.Tensor) -> None:
"""Calculate emprical fisher loss"""
self.net.zero_grad()
policy_fisher_loss = -policy.log_prob().mean()
sample_value = torch.randn_like(value) + value.detach()
value_fisher_loss = -(value - sample_value).pow(2).mean()
fisher_loss = policy_fisher_loss + value_fisher_loss
with self.precond.save_grad():
fisher_loss.backward(retain_graph=True)
def _step_optimizer(self) -> None:
"""Approximates F^-1∇h and apply it."""
self.precond.step()
self.optimizer.step()
self.lr_cooler.lr_decay(self.optimizer)
| [
"torch.randn_like"
] | 1.2 | kngwyu/Rainy | 535f8f49d9efe0324a8480d0165ab1b35d83dc19 |
1.10 | import os
import torch
from torch import nn
from torch.nn.functional import interpolate, softmax
from torch.cuda.amp import autocast
from .base import BaseNet
norm_layer = nn.BatchNorm2d
class DFF(BaseNet):
"""
Reference:
- Hu, Yuan, et.al "Dynamic Feature Fusion for Semantic Edge Detection", AAAI 2019
"""
def __init__(self, backbone, amp=False):
super(DFF, self).__init__(backbone, amp)
self.side0 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1),
norm_layer(1))
self.side0_residual = SideResidual(in_channels=128, inter_channels=128 // 8, upsample_rate=1)
self.side1 = nn.Sequential(nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1),
norm_layer(1),
nn.ConvTranspose2d(1, 1, 4, stride=2, padding=1, bias=False))
self.side1_residual = SideResidual(in_channels=256, inter_channels=256 // 8, upsample_rate=2)
self.side2 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1),
norm_layer(1),
nn.ConvTranspose2d(1, 1, 4, stride=2, padding=1, bias=False))
self.side2_residual = SideResidual(in_channels=512, inter_channels=512 // 8, upsample_rate=2)
self.side3 = nn.Sequential(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=1),
norm_layer(1),
nn.ConvTranspose2d(1, 1, 8, stride=4, padding=2, bias=False))
self.side3_residual = SideResidual(in_channels=1024, inter_channels=1024 // 8, upsample_rate=4)
self.side4 = nn.Sequential(nn.Conv2d(in_channels=2048, out_channels=1, kernel_size=1),
norm_layer(1),
nn.ConvTranspose2d(1, 1, 16, stride=8, padding=4, bias=False))
self.side4_residual = SideResidual(in_channels=2048, inter_channels=2048 // 8, upsample_rate=8)
self.side4_weight = nn.Sequential(nn.Conv2d(in_channels=2048, out_channels=5, kernel_size=1),
norm_layer(5),
nn.ConvTranspose2d(5, 5, 16, stride=8, padding=4, bias=False))
self.bn = norm_layer(5)
self.sigmoid = nn.Sigmoid()
self.ada_learner = LocationAdaptiveLearner(1, 5, 5, norm_layer=norm_layer)
def forward(self, x):
with autocast(enabled=self.amp):
x, c1, c2, c3, c4 = self.backbone.base_forward(x)
side0 = self.side0(x) + self.side0_residual(x)
side1 = self.side1(c1) + self.side1_residual(c1)
side2 = self.side2(c2) + self.side2_residual(c2)
side3 = self.side3(c3) + self.side3_residual(c3)
side4 = self.side4(c4) + self.side4_residual(c4)
side4_weight = self.side4_weight(c4)
ada_weights = self.ada_learner(side4_weight)
fused = torch.cat((side0, side1, side2, side3, side4), dim=1)
fused = self.bn(fused)
fused = fused.view(fused.size(0), 1, -1, fused.size(2), fused.size(3))
fused = torch.mul(fused, ada_weights)
fused = torch.sum(fused, 2)
out = self.sigmoid(fused)
return fused, out
class LocationAdaptiveLearner(nn.Module):
"""docstring for LocationAdaptiveLearner"""
def __init__(self, nclass, in_channels, out_channels, norm_layer=nn.BatchNorm2d):
super(LocationAdaptiveLearner, self).__init__()
self.nclass = nclass
self.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=True),
norm_layer(out_channels),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(out_channels, out_channels, 1, bias=True),
norm_layer(out_channels),
nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(nn.Conv2d(out_channels, out_channels, 1, bias=True),
norm_layer(out_channels))
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(x.size(0), self.nclass, -1, x.size(2), x.size(3))
return x
class SideResidual(nn.Module):
def __init__(self, in_channels, inter_channels, upsample_rate):
super(SideResidual, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=inter_channels, kernel_size=1)
self.bn1 = norm_layer(inter_channels)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=inter_channels, out_channels=inter_channels, kernel_size=3, padding=1)
self.bn2 = norm_layer(inter_channels)
self.conv3 = nn.Conv2d(in_channels=inter_channels, out_channels=1, kernel_size=1)
self.bn3 = norm_layer(1)
self.upsample_rate = upsample_rate
if upsample_rate == 2:
self.upsample = nn.ConvTranspose2d(1, 1, 4, stride=2, padding=1, bias=False)
elif upsample_rate == 4:
self.upsample = nn.ConvTranspose2d(1, 1, 8, stride=4, padding=2, bias=False)
elif upsample_rate == 8:
self.upsample = nn.ConvTranspose2d(1, 1, 16, stride=8, padding=4, bias=False)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.upsample_rate != 1:
out = self.upsample(out)
return out
| [
"torch.cat",
"torch.mul",
"torch.cuda.amp.autocast",
"torch.nn.Sigmoid",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.sum"
] | 1.10.0 | Theia-4869/U-RISC | c493b11ab525b39a5ac029c3f83e059d703abaae |
1.0 | import torch
from abstract_rl.src.data_structures.abstract_conf.model_configuration import ModelConfiguration
from abstract_rl.src.operator.trajectory_operator import TrajectoryOperator
class ResampleOperator(TrajectoryOperator):
"""
Samples for each state additional actionsa and equips
"""
def __repr__(self):
return "resample"
def __init__(self, mc):
"""
Initializes a new retrace operator.
:param mc: The model configuration with env and so on.
"""
assert isinstance(mc, ModelConfiguration)
conf = mc.get('conf')
self.conf = conf
self.add_act = conf['add_acts']
self.mc = mc
self.policy = mc.get('policy', True)
self.q_net = mc.get('q_network', True)
env = mc['env']
self.discount = env.discount()
def transform(self, trajectory):
"""
Transform a trajectory with the current instance of the operator.
"""
# sample actions for sample based
t_states = torch.Tensor(trajectory.states)
f_stats = self.policy.forward(t_states)
sampled_actions = self.policy.sample_actions(suff_stats=f_stats, num_actions=self.add_act)
ll = self.policy.log_prob(sampled_actions, suff_stats=f_stats)
q_vals = self.q_net.q_val(t_states.repeat([self.add_act, 1]), sampled_actions.view([-1, 1]))
q_vals = q_vals.view(sampled_actions.size())
act_rew = sampled_actions.detach().numpy()
q_vals = q_vals.detach().numpy()
ll = ll.detach().numpy()
trajectory['add_act'] = act_rew
trajectory['add_q'] = q_vals
trajectory['add_pi'] = ll
| [
"torch.Tensor"
] | 1.0.1 | kosmitive/abstract_rl | 13038a1a5a93c78374ba869c9e75221c2b73d290 |
1.3 | import os
import cv2
import glob
import numpy as np
from PIL import Image
import torch
import torchvision
from torch.utils import data
from torchvision import transforms
import torch
from torch import nn
import torchvision
import torch.nn.functional as F
def get_char_dict():
char_dict = {}
char_dict["pad"] = 0
char_dict["sos"] = 1
char_dict["eos"] = 2
for i in range(32, 127):
char_dict[chr(i)] = len(char_dict)
inverse_char_dict = {v: k for k, v in char_dict.items()}
return char_dict, inverse_char_dict
def resize_image(image, desired_size):
''' Helper function to resize an image while keeping the aspect ratio.
Parameter
---------
image: np.array
The image to be resized.
desired_size: (int, int)
The (height, width) of the resized image
Return
------
image: np.array
The image of size = desired_size
bounding box: (int, int, int, int)
(x, y, w, h) in percentages of the resized image of the original
'''
size = image.shape[:2]
if size[0] > desired_size[0] or size[1] > desired_size[1]:
ratio_w = float(desired_size[0]) / size[0]
ratio_h = float(desired_size[1]) / size[1]
ratio = min(ratio_w, ratio_h)
new_size = tuple([int(x * ratio) for x in size])
image = cv2.resize(image, (new_size[1], new_size[0]))
size = image.shape
delta_w = max(0, desired_size[1] - size[1])
delta_h = max(0, desired_size[0] - size[0])
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = image[0][0]
if color < 230:
color = 230
image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=float(color))
crop_bb = (left / image.shape[1], top / image.shape[0], (image.shape[1] - right - left) / image.shape[1],
(image.shape[0] - bottom - top) / image.shape[0])
image[image > 230] = 255
return image, crop_bb
def get_transform(phase="train"):
transfrom_PIL_list = [
transforms.RandomAffine((-2, 2), fillcolor=255),
transforms.ColorJitter(brightness=0.5),
transforms.ColorJitter(contrast=0.5),
transforms.ColorJitter(saturation=0.5),
]
transfrom_tensor_list = [
transforms.RandomErasing(p=0.5, scale=(0.02, 0.1), value=0),
]
if phase == "train":
transform = transforms.Compose([
transforms.RandomApply(transfrom_PIL_list),
transforms.ToTensor(),
# transforms.RandomApply(transfrom_tensor_list),
transforms.Normalize(
mean=[0.5],
std=[0.5]),
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.5],
std=[0.5]),
])
return transform
def read_img(img, inference_transform,desired_size=(128, 1024)):
img_resize, crop_cc = resize_image(img, desired_size)
img_resize = Image.fromarray(img_resize)
img_tensor = inference_transform(img_resize)
return img_tensor
class IAM_Dataset_line(data.Dataset):
"""
pytorch dataset for IAM handwritten dataset
"""
def __init__(self, dataset_path, tokenizer, phase="train", padding=128):
"""
inital dataset with path, tokenizer, when you want to train phase is "train",
for test and valid phase is "test" and "valid", padding is the max length of
your input text.
"""
self.phase = phase
self.dataset_path = dataset_path
self.padding = padding
self.tokenizer = tokenizer
self.line_imgs = self.read_train_valid_test_files()
self.label_dict = self.read_label_dict()
self.transform = get_transform(self.phase)
def __len__(self):
return len(self.line_imgs)
def read_label_dict(self):
"""
Read the line ground truth data from txt as dict
key is file name, values is texts
"""
line_txt_path = os.path.join(self.dataset_path, "ascii/lines.txt")
with open(line_txt_path, "r") as f:
lines = f.readlines()
ground_truth_dict = {}
for line in lines:
if line.startswith("#"):
continue
line = line.strip()
line_blocks = line.split(" ")
key = line_blocks[0]
texts = line_blocks[-1].replace("|", " ")
ground_truth_dict[key] = texts
return ground_truth_dict
def read_train_valid_test_files(self):
"""
Split all line imgs into train,valid,test set.
These sets are divided based on file level, which means line imgs from same file
will not be divided into different set
"""
np.random.seed(55)
folder_path = os.path.join(self.dataset_path, "lines")
folders = glob.glob(os.path.join(folder_path, "*"))
files = []
for folder in folders:
files_in_folder = glob.glob(os.path.join(folder, "*"))
files.extend(files_in_folder)
train_file_num = int(len(files) * 0.9)
valid_file_num = int(len(files) * 0.05)
files_permute = np.random.permutation(files)
train_files = files_permute[:train_file_num]
valid_files = files_permute[train_file_num:train_file_num + valid_file_num]
test_files = files_permute[train_file_num + valid_file_num:]
train_lines = []
valid_lines = []
test_lines = []
files_tuple = [(train_lines, train_files), (valid_lines, valid_files), (test_lines, test_files)]
for phase_lines, phase_files in files_tuple:
for file_folder in phase_files:
file_imgs = glob.glob(os.path.join(file_folder, "*.png"))
for img_path in file_imgs:
phase_lines.append((img_path, os.path.basename(img_path).split(".")[0]))
print("Total files: ", len(files))
print("Train files: ", len(train_files))
print("Valid files: ", len(valid_files))
print("Test files: ", len(test_files))
if self.phase == "train":
return train_lines
elif self.phase == "valid":
return valid_lines
else:
return test_lines
def read_img(self, img_path, desired_size=(128, 1024)):
img = cv2.imread(img_path, 0)
img_resize, crop_cc = resize_image(img, desired_size)
img_resize = Image.fromarray(img_resize)
img_tensor = self.transform(img_resize)
return img_tensor
def read_label(self, label_key):
text = self.label_dict[label_key]
line_label = self.tokenizer.encode(text)
input_lengths = len(line_label)
if self.padding > 0:
padded_line = np.ones(self.padding)*-1
max_len = min(self.padding, input_lengths)
padded_line[:max_len] = line_label[:max_len]
line_label = padded_line
input_lengths = max_len
label_tensor = torch.from_numpy(line_label)
input_lengths = torch.tensor(input_lengths)
return label_tensor, input_lengths
def __getitem__(self, index):
image_path, label_key = self.line_imgs[index]
X = self.read_img(image_path)
y, lengths = self.read_label(label_key)
return X.float(), y.long(), lengths.long()
| [
"torch.from_numpy",
"torch.tensor"
] | 1.3.1 | guanjianyu/pytorch_hand_written_recognition | b9804ee1213fa744430ee7c25a52b04ab295747a |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates.
import math
import os
from copy import deepcopy
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.models import BaseModel
from mmf.modules.hf_layers import replace_with_jit
from mmf.utils.configuration import get_mmf_cache_dir
from mmf.utils.modeling import get_optimizer_parameters_for_bert
from omegaconf import OmegaConf
from torch import Tensor, nn
from torch.nn import CrossEntropyLoss
from transformers.modeling_bert import (
ACT2FN,
BertConfig,
BertEmbeddings,
BertIntermediate,
BertLMPredictionHead,
BertOutput,
BertPredictionHeadTransform,
BertPreTrainedModel,
BertSelfOutput,
)
from mmf.utils.checkpoint import load_pretrained_model
from mmf.models.interfaces.feature_models import FeatureModelInterface
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.visualization = config.visualization
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self, hidden_states: Tensor, attention_mask: Tensor
) -> Tuple[Tensor, Dict[str, Tensor]]:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in
# BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
if self.visualization:
attn_data = {
"attn": attention_probs,
"queries": query_layer,
"keys": key_layer,
}
else:
attn_data = {}
return context_layer, attn_data
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(
self, input_tensor: Tensor, attention_mask: Tensor
) -> Tuple[Tensor, Dict[str, Tensor]]:
self_output, attention_probs = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output, attention_probs
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self, hidden_states: Tensor, attention_mask: Tensor
) -> Tuple[Tensor, Dict[str, Tensor]]:
attention_output, attention_probs = self.attention(
hidden_states, attention_mask
)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, attention_probs
@torch.no_grad()
def forward_no_grad(self, hidden_states, attention_mask):
return self.forward(hidden_states, attention_mask)
class BertImageSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.v_hidden_size % config.v_num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.v_hidden_size, config.v_num_attention_heads)
)
self.dynamic_attention = config.dynamic_attention
self.num_attention_heads = config.v_num_attention_heads
self.attention_head_size = int(
config.v_hidden_size / config.v_num_attention_heads
)
self.visualization = config.visualization
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.v_hidden_size, self.all_head_size)
self.key = nn.Linear(config.v_hidden_size, self.all_head_size)
self.value = nn.Linear(config.v_hidden_size, self.all_head_size)
if self.dynamic_attention:
self.dyLinear_q = nn.Linear(config.hidden_size, self.all_head_size)
self.dyLinear_k = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.v_attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: Tensor,
attention_mask: Tensor,
txt_embedding: Tensor,
txt_attention_mask: Tensor,
) -> Tuple[Tensor, Dict[str, Tensor]]:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if (
self.dynamic_attention
and hasattr(self, "dyLinear_q")
and hasattr(self, "dyLinear_k")
):
pool_embedding = (txt_embedding * txt_attention_mask).sum(1)
pool_embedding = pool_embedding / txt_attention_mask.sum(1)
# given pool embedding, Linear and Sigmoid layer.
gate_q = 1 + torch.sigmoid(self.dyLinear_q(pool_embedding))
gate_k = 1 + torch.sigmoid(self.dyLinear_k(pool_embedding))
mixed_query_layer = mixed_query_layer * gate_q.unsqueeze(1)
mixed_key_layer = mixed_key_layer * gate_k.unsqueeze(1)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the
# raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel
# forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
if self.visualization:
attn_data = {
"attn": attention_probs,
"queries": query_layer,
"keys": key_layer,
}
else:
attn_data = {}
return context_layer, attn_data
class BertImageSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_hidden_size)
self.LayerNorm = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.v_hidden_dropout_prob)
def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertImageAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertImageSelfAttention(config)
self.output = BertImageSelfOutput(config)
def forward(
self,
input_tensor: Tensor,
attention_mask: Tensor,
txt_embedding: Tensor,
txt_attention_mask: Tensor,
) -> Tuple[Tensor, Dict[str, Tensor]]:
self_output, attention_probs = self.self(
input_tensor, attention_mask, txt_embedding, txt_attention_mask
)
attention_output = self.output(self_output, input_tensor)
return attention_output, attention_probs
class BertImageIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_intermediate_size)
if isinstance(config.v_hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.v_hidden_act]
else:
self.intermediate_act_fn = config.v_hidden_act
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertImageOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_intermediate_size, config.v_hidden_size)
self.LayerNorm = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.v_hidden_dropout_prob)
def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertImageLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = BertImageAttention(config)
self.intermediate = BertImageIntermediate(config)
self.output = BertImageOutput(config)
def forward(
self,
hidden_states: Tensor,
attention_mask: Tensor,
txt_embedding: Tensor,
txt_attention_mask: Tensor,
) -> Tuple[Tensor, Dict[str, Tensor]]:
attention_output, attention_probs = self.attention(
hidden_states, attention_mask, txt_embedding, txt_attention_mask
)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, attention_probs
@torch.no_grad()
def forward_no_grad(
self,
hidden_states: Tensor,
attention_mask: Tensor,
txt_embedding: Tensor,
txt_attention_mask: Tensor,
) -> Tuple[Tensor, Dict[str, Tensor]]:
return self.forward(
hidden_states, attention_mask, txt_embedding, txt_attention_mask
)
class BertBiAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.bi_hidden_size % config.bi_num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.bi_hidden_size, config.bi_num_attention_heads)
)
self.visualization = config.visualization
self.num_attention_heads = config.bi_num_attention_heads
self.attention_head_size = int(
config.bi_hidden_size / config.bi_num_attention_heads
)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# self.scale = nn.Linear(1, self.num_attention_heads, bias=False)
# self.scale_act_fn = ACT2FN['relu']
self.query1 = nn.Linear(config.v_hidden_size, self.all_head_size)
self.key1 = nn.Linear(config.v_hidden_size, self.all_head_size)
self.value1 = nn.Linear(config.v_hidden_size, self.all_head_size)
# self.logit1 = nn.Linear(config.hidden_size, self.num_attention_heads)
self.dropout1 = nn.Dropout(config.v_attention_probs_dropout_prob)
self.query2 = nn.Linear(config.hidden_size, self.all_head_size)
self.key2 = nn.Linear(config.hidden_size, self.all_head_size)
self.value2 = nn.Linear(config.hidden_size, self.all_head_size)
# self.logit2 = nn.Linear(config.hidden_size, self.num_attention_heads)
self.dropout2 = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
input_tensor1: Tensor,
attention_mask1: Tensor,
input_tensor2: Tensor,
attention_mask2: Tensor,
co_attention_mask: Optional[Tensor] = None,
use_co_attention_mask: bool = False,
) -> Tuple[Tensor, Tensor, Dict[str, Tensor]]:
# for vision input.
mixed_query_layer1 = self.query1(input_tensor1)
mixed_key_layer1 = self.key1(input_tensor1)
mixed_value_layer1 = self.value1(input_tensor1)
# mixed_logit_layer1 = self.logit1(input_tensor1)
query_layer1 = self.transpose_for_scores(mixed_query_layer1)
key_layer1 = self.transpose_for_scores(mixed_key_layer1)
value_layer1 = self.transpose_for_scores(mixed_value_layer1)
# logit_layer1 = self.transpose_for_logits(mixed_logit_layer1)
# for text input:
mixed_query_layer2 = self.query2(input_tensor2)
mixed_key_layer2 = self.key2(input_tensor2)
mixed_value_layer2 = self.value2(input_tensor2)
# mixed_logit_layer2 = self.logit2(input_tensor2)
query_layer2 = self.transpose_for_scores(mixed_query_layer2)
key_layer2 = self.transpose_for_scores(mixed_key_layer2)
value_layer2 = self.transpose_for_scores(mixed_value_layer2)
# logit_layer2 = self.transpose_for_logits(mixed_logit_layer2)
# Take the dot product between "query2" and "key1" to get the raw
# attention scores for value 1.
attention_scores1 = torch.matmul(query_layer2, key_layer1.transpose(-1, -2))
attention_scores1 = attention_scores1 / math.sqrt(self.attention_head_size)
attention_scores1 = attention_scores1 + attention_mask1
# if use_co_attention_mask:
# attention_scores1 = attention_scores1 + co_attention_mask.permute(0,1,3,2)
# Normalize the attention scores to probabilities.
attention_probs1 = nn.functional.softmax(attention_scores1, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs1 = self.dropout1(attention_probs1)
context_layer1 = torch.matmul(attention_probs1, value_layer1)
context_layer1 = context_layer1.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape1 = context_layer1.size()[:-2] + (self.all_head_size,)
context_layer1 = context_layer1.view(new_context_layer_shape1)
# Take the dot product between "query1" and "key2" to get the
# raw attention scores for value 2.
attention_scores2 = torch.matmul(query_layer1, key_layer2.transpose(-1, -2))
attention_scores2 = attention_scores2 / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel
# forward() function)
# we can comment this line for single flow.
attention_scores2 = attention_scores2 + attention_mask2
# if use_co_attention_mask:
# attention_scores2 = attention_scores2 + co_attention_mask
# Normalize the attention scores to probabilities.
attention_probs2 = nn.functional.softmax(attention_scores2, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs2 = self.dropout2(attention_probs2)
context_layer2 = torch.matmul(attention_probs2, value_layer2)
context_layer2 = context_layer2.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape2 = context_layer2.size()[:-2] + (self.all_head_size,)
context_layer2 = context_layer2.view(new_context_layer_shape2)
attn_data = {}
if self.visualization:
attn_data = {
"attn1": attention_probs1,
"queries1": query_layer2,
"keys1": key_layer1,
"attn2": attention_probs2,
"querues2": query_layer1,
"keys2": key_layer2,
}
return context_layer1, context_layer2, attn_data
class BertBiOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense1 = nn.Linear(config.bi_hidden_size, config.v_hidden_size)
self.LayerNorm1 = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout1 = nn.Dropout(config.v_hidden_dropout_prob)
self.q_dense1 = nn.Linear(config.bi_hidden_size, config.v_hidden_size)
self.q_dropout1 = nn.Dropout(config.v_hidden_dropout_prob)
self.dense2 = nn.Linear(config.bi_hidden_size, config.hidden_size)
self.LayerNorm2 = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout2 = nn.Dropout(config.hidden_dropout_prob)
self.q_dense2 = nn.Linear(config.bi_hidden_size, config.hidden_size)
self.q_dropout2 = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
hidden_states1: Tensor,
input_tensor1: Tensor,
hidden_states2: Tensor,
input_tensor2: Tensor,
) -> Tuple[Tensor, Tensor]:
context_state1 = self.dense1(hidden_states1)
context_state1 = self.dropout1(context_state1)
context_state2 = self.dense2(hidden_states2)
context_state2 = self.dropout2(context_state2)
hidden_states1 = self.LayerNorm1(context_state1 + input_tensor1)
hidden_states2 = self.LayerNorm2(context_state2 + input_tensor2)
return hidden_states1, hidden_states2
class BertConnectionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.biattention = BertBiAttention(config)
self.biOutput = BertBiOutput(config)
self.v_intermediate = BertImageIntermediate(config)
self.v_output = BertImageOutput(config)
self.t_intermediate = BertIntermediate(config)
self.t_output = BertOutput(config)
def forward(
self,
input_tensor1: Tensor,
attention_mask1: Tensor,
input_tensor2: Tensor,
attention_mask2: Tensor,
co_attention_mask: Optional[Tensor] = None,
use_co_attention_mask: bool = False,
) -> Tuple[Tensor, Tensor, Dict[str, Tensor]]:
bi_output1, bi_output2, co_attention_probs = self.biattention(
input_tensor1,
attention_mask1,
input_tensor2,
attention_mask2,
co_attention_mask,
use_co_attention_mask,
)
attention_output1, attention_output2 = self.biOutput(
bi_output2, input_tensor1, bi_output1, input_tensor2
)
intermediate_output1 = self.v_intermediate(attention_output1)
layer_output1 = self.v_output(intermediate_output1, attention_output1)
intermediate_output2 = self.t_intermediate(attention_output2)
layer_output2 = self.t_output(intermediate_output2, attention_output2)
return layer_output1, layer_output2, co_attention_probs
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
# in the bert encoder, we need to extract three things here.
# text bert layer: BertLayer
# vision bert layer: BertImageLayer
# Bi-Attention: Given the output of two bertlayer, perform bi-directional
# attention and add on two layers.
self.FAST_MODE = config.fast_mode
self.with_coattention = config.with_coattention
self.v_biattention_id = config.v_biattention_id
self.t_biattention_id = config.t_biattention_id
self.in_batch_pairs = config.in_batch_pairs
self.fixed_t_layer = config.fixed_t_layer
self.fixed_v_layer = config.fixed_v_layer
layer = BertLayer(config)
v_layer = BertImageLayer(config)
connect_layer = BertConnectionLayer(config)
self.layer = nn.ModuleList(
[deepcopy(layer) for _ in range(config.num_hidden_layers)]
)
self.v_layer = nn.ModuleList(
[deepcopy(v_layer) for _ in range(config.v_num_hidden_layers)]
)
self.c_layer = nn.ModuleList(
[deepcopy(connect_layer) for _ in range(len(config.v_biattention_id))]
)
def forward(
self,
txt_embedding: Tensor,
image_embedding: Tensor,
txt_attention_mask: Tensor,
txt_attention_mask2: Tensor,
image_attention_mask: Tensor,
co_attention_mask: Tensor,
output_all_encoded_layers: bool = True,
output_all_attention_masks: bool = False,
) -> Tuple[
List[Tensor],
List[Tensor],
Tuple[List[Tensor], List[Tensor], List[Tuple[Tensor, Tensor]]],
]:
v_start = 0
t_start = 0
count = 0
all_encoder_layers_t: List[Tensor] = []
all_encoder_layers_v: List[Tensor] = []
all_attention_mask_t: List[Tensor] = []
all_attnetion_mask_v: List[Tensor] = []
all_attention_mask_c: List[Tuple[Tensor, Tensor]] = []
batch_size, num_words, t_hidden_size = txt_embedding.size()
# should be num_samples * num_objects * dim
_, num_regions, v_hidden_size = image_embedding.size()
use_co_attention_mask = False
for v_layer_id, t_layer_id in zip(self.v_biattention_id, self.t_biattention_id):
v_end = v_layer_id
t_end = t_layer_id
assert self.fixed_t_layer <= t_end
assert self.fixed_v_layer <= v_end
cur_idx = 0
for cur_layer in self.layer:
if t_start <= cur_idx < self.fixed_t_layer:
txt_embedding, txt_attention_probs = cur_layer.forward_no_grad(
txt_embedding, txt_attention_mask
)
t_start = self.fixed_t_layer
if output_all_attention_masks and "attn" in txt_attention_probs:
all_attention_mask_t.append(txt_attention_probs["attn"])
cur_idx += 1
cur_idx = 0
for cur_layer in self.layer:
if t_start <= cur_idx < t_end:
txt_embedding, txt_attention_probs = cur_layer(
txt_embedding, txt_attention_mask
)
if output_all_attention_masks and "attn" in txt_attention_probs:
all_attention_mask_t.append(txt_attention_probs["attn"])
cur_idx += 1
cur_v_idx = 0
for cur_v_layer in self.v_layer:
if v_start <= cur_v_idx < self.fixed_v_layer:
(
image_embedding,
image_attention_probs,
) = cur_v_layer.forward_no_grad(
image_embedding,
image_attention_mask,
txt_embedding,
txt_attention_mask2,
)
v_start = self.fixed_v_layer
if output_all_attention_masks and "attn" in image_attention_probs:
all_attnetion_mask_v.append(image_attention_probs["attn"])
cur_v_idx += 1
cur_v_idx = 0
for cur_v_layer in self.v_layer:
if v_start <= cur_v_idx < v_end:
image_embedding, image_attention_probs = cur_v_layer(
image_embedding,
image_attention_mask,
txt_embedding,
txt_attention_mask2,
)
if output_all_attention_masks and "attn" in image_attention_probs:
all_attnetion_mask_v.append(image_attention_probs["attn"])
cur_v_idx += 1
if count == 0 and self.in_batch_pairs:
# new batch size is the batch_size ^2
image_embedding = (
image_embedding.unsqueeze(0)
.expand(batch_size, batch_size, num_regions, v_hidden_size)
.contiguous()
.view(batch_size * batch_size, num_regions, v_hidden_size)
)
image_attention_mask = (
image_attention_mask.unsqueeze(0)
.expand(batch_size, batch_size, 1, 1, num_regions)
.contiguous()
.view(batch_size * batch_size, 1, 1, num_regions)
)
txt_embedding = (
txt_embedding.unsqueeze(1)
.expand(batch_size, batch_size, num_words, t_hidden_size)
.contiguous()
.view(batch_size * batch_size, num_words, t_hidden_size)
)
txt_attention_mask = (
txt_attention_mask.unsqueeze(1)
.expand(batch_size, batch_size, 1, 1, num_words)
.contiguous()
.view(batch_size * batch_size, 1, 1, num_words)
)
co_attention_mask = (
co_attention_mask.unsqueeze(1)
.expand(batch_size, batch_size, 1, num_regions, num_words)
.contiguous()
.view(batch_size * batch_size, 1, num_regions, num_words)
)
if count == 0 and self.FAST_MODE:
txt_embedding = txt_embedding.expand(
image_embedding.size(0),
txt_embedding.size(1),
txt_embedding.size(2),
)
txt_attention_mask = txt_attention_mask.expand(
image_embedding.size(0),
txt_attention_mask.size(1),
txt_attention_mask.size(2),
txt_attention_mask.size(3),
)
if self.with_coattention:
cur_c_idx = 0
for cur_c_layer in self.c_layer:
if cur_c_idx == count:
# do the bi attention.
(
image_embedding,
txt_embedding,
co_attention_probs,
) = cur_c_layer(
image_embedding,
image_attention_mask,
txt_embedding,
txt_attention_mask,
co_attention_mask,
use_co_attention_mask,
)
if (
output_all_attention_masks
and "attn1" in co_attention_probs
and "attn2" in co_attention_probs
):
all_attention_mask_c.append(
(
co_attention_probs["attn1"],
co_attention_probs["attn2"],
)
)
cur_c_idx += 1
v_start = v_end
t_start = t_end
count += 1
if output_all_encoded_layers:
all_encoder_layers_t.append(txt_embedding)
all_encoder_layers_v.append(image_embedding)
cur_v_idx = 0
for cur_v_layer in self.v_layer:
if cur_v_idx >= v_start:
image_embedding, image_attention_probs = cur_v_layer(
image_embedding,
image_attention_mask,
txt_embedding,
txt_attention_mask2,
)
if output_all_attention_masks and "attn" in image_attention_probs:
all_attnetion_mask_v.append(image_attention_probs["attn"])
cur_v_idx += 1
cur_idx = 0
for cur_layer in self.layer:
if cur_idx >= t_start:
txt_embedding, txt_attention_probs = cur_layer(
txt_embedding, txt_attention_mask
)
if output_all_attention_masks and "attn" in txt_attention_probs:
all_attention_mask_t.append(txt_attention_probs["attn"])
cur_idx += 1
# add the end part to finish.
if not output_all_encoded_layers:
all_encoder_layers_t.append(txt_embedding)
all_encoder_layers_v.append(image_embedding)
return (
all_encoder_layers_t,
all_encoder_layers_v,
(all_attention_mask_t, all_attnetion_mask_v, all_attention_mask_c),
)
class BertTextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.bi_hidden_size)
self.activation = nn.ReLU()
def forward(self, hidden_states: Tensor) -> Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertImagePooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_hidden_size, config.bi_hidden_size)
self.activation = nn.ReLU()
def forward(self, hidden_states: Tensor) -> Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertImgPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.v_hidden_act
self.LayerNorm = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertImagePredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertImgPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.v_hidden_size, config.v_target_size)
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.bi_seq_relationship = nn.Linear(config.bi_hidden_size, 2)
self.imagePredictions = BertImagePredictionHead(config)
self.fusion_method = config.fusion_method
self.dropout = nn.Dropout(0.1)
def forward(
self,
sequence_output_t: Tensor,
sequence_output_v: Tensor,
pooled_output_t: Tensor,
pooled_output_v: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
if self.fusion_method == "sum":
pooled_output = self.dropout(pooled_output_t + pooled_output_v)
elif self.fusion_method == "mul":
pooled_output = self.dropout(pooled_output_t * pooled_output_v)
else:
raise AssertionError
prediction_scores_t = self.predictions(sequence_output_t)
seq_relationship_score = self.bi_seq_relationship(pooled_output)
prediction_scores_v = self.imagePredictions(sequence_output_v)
return prediction_scores_t, prediction_scores_v, seq_relationship_score
class BertImageFeatureEmbeddings(nn.Module):
"""Construct the embeddings from image, spatial location (omit now) and
token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.image_embeddings = nn.Linear(config.v_feature_size, config.v_hidden_size)
self.image_location_embeddings = nn.Linear(5, config.v_hidden_size)
self.LayerNorm = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, image_feature: Tensor, image_location: Tensor) -> Tensor:
img_embeddings = self.image_embeddings(image_feature)
loc_embeddings = self.image_location_embeddings(image_location)
# TODO: we want to make the padding_idx==0, however, with custom initilization,
# it seems it will have a bias. Let's do masking for now
embeddings = self.LayerNorm(img_embeddings + loc_embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class ViLBERTBase(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# Replace transformer layers with scriptable JIT layers
replace_with_jit()
# initilize word embedding
self.embeddings = BertEmbeddings(config)
self.task_specific_tokens = config.task_specific_tokens
# initlize the vision embedding
self.v_embeddings = BertImageFeatureEmbeddings(config)
self.encoder = BertEncoder(config)
self.t_pooler = BertTextPooler(config)
self.v_pooler = BertImagePooler(config)
self.init_weights()
def forward(
self,
input_txt: Tensor,
image_feature: Tensor,
image_location: Tensor,
token_type_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
image_attention_mask: Optional[Tensor] = None,
co_attention_mask: Optional[Tensor] = None,
task_ids: Optional[Tensor] = None,
output_all_encoded_layers: bool = False,
output_all_attention_masks: bool = False,
) -> Tuple[
Tensor,
Tensor,
Tensor,
Tensor,
Optional[Tuple[List[Tensor], List[Tensor], List[Tuple[Tensor, Tensor]]]],
Optional[List[Tensor]],
Optional[List[Tensor]],
]:
if attention_mask is None:
attention_mask = torch.ones_like(input_txt)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_txt)
if image_attention_mask is None:
image_attention_mask = torch.ones(
image_feature.size(0), image_feature.size(1)
).type_as(input_txt)
all_attention_mask_output: Optional[
Tuple[List[Tensor], List[Tensor], List[Tuple[Tensor, Tensor]]]
] = None
encoded_layers_t_output: Optional[List[Tensor]] = None
encoded_layers_v_output: Optional[List[Tensor]] = None
if self.task_specific_tokens:
# extend the mask
mask_tokens = torch.ones(input_txt.size(0), 1, device=input_txt.device)
attention_mask = torch.cat([mask_tokens, attention_mask], dim=1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of
# causal attention used in OpenAI GPT, we just need to prepare the
# broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_image_attention_mask = image_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask2 = attention_mask.unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
if not torch.jit.is_scripting():
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if not torch.jit.is_scripting():
extended_attention_mask2 = extended_attention_mask2.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_image_attention_mask = extended_image_attention_mask.to(
dtype=next(self.parameters()).dtype
)
extended_image_attention_mask = (1.0 - extended_image_attention_mask) * -10000.0
if co_attention_mask is None:
co_attention_mask = torch.zeros(
input_txt.size(0), image_feature.size(1), input_txt.size(1)
).type_as(extended_image_attention_mask)
extended_co_attention_mask = co_attention_mask.unsqueeze(1)
# extended_co_attention_mask = co_attention_mask.unsqueeze(-1)
extended_co_attention_mask = extended_co_attention_mask * 5.0
if not torch.jit.is_scripting():
extended_co_attention_mask = extended_co_attention_mask.to(
dtype=next(self.parameters()).dtype
)
embedding_output = self.embeddings(input_txt, token_type_ids, task_ids)
v_embedding_output = self.v_embeddings(image_feature, image_location)
encoded_layers_t, encoded_layers_v, all_attention_mask = self.encoder(
embedding_output,
v_embedding_output,
extended_attention_mask,
extended_attention_mask2,
extended_image_attention_mask,
extended_co_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_all_attention_masks=output_all_attention_masks,
)
sequence_output_t = encoded_layers_t[-1]
sequence_output_v = encoded_layers_v[-1]
pooled_output_t = self.t_pooler(sequence_output_t)
pooled_output_v = self.v_pooler(sequence_output_v)
if output_all_attention_masks:
all_attention_mask_output = all_attention_mask
if output_all_encoded_layers:
encoded_layers_t_output = encoded_layers_t
encoded_layers_v_output = encoded_layers_v
return (
sequence_output_t,
sequence_output_v,
pooled_output_t,
pooled_output_v,
all_attention_mask_output,
encoded_layers_t_output,
encoded_layers_v_output,
)
class ViLBERTForPretraining(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.bert = ViLBERTBase.from_pretrained(
self.config.bert_model_name,
config=BertConfig.from_dict(
OmegaConf.to_container(self.config, resolve=True)
),
cache_dir=os.path.join(get_mmf_cache_dir(), "distributed_{}".format(-1)),
)
self.cls = BertPreTrainingHeads(config)
self.vocab_size = self.config.vocab_size
self.visual_target = config.visual_target
self.num_negative = config.num_negative
self.loss_fct = CrossEntropyLoss(ignore_index=-1)
if self.visual_target == 0:
self.vis_criterion = nn.KLDivLoss(reduction="none")
elif self.visual_target == 1:
self.vis_criterion = nn.MSELoss(reduction="none")
elif self.visual_target == 2:
self.vis_criterion = CrossEntropyLoss()
def init_weights(self):
if self.config.random_initialize is False:
if self.config.bert_model_name is None:
# No pretrained model, init weights
self.bert.init_weights()
self.cls.apply(self.bert._init_weights)
self.tie_weights()
def tie_weights(self):
"""Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning
them instead.
"""
self._tie_or_clone_weights(
self.cls.predictions.decoder, self.bert.embeddings.word_embeddings
)
def forward(
self,
input_ids: Tensor,
image_feature: Tensor,
image_location: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
image_attention_mask: Tensor,
masked_lm_labels: Optional[Tensor] = None,
image_label: Optional[Tensor] = None,
image_target: Optional[Tensor] = None,
output_all_attention_masks: bool = False,
) -> Dict[str, Tensor]:
masked_img_loss: Optional[Tensor] = None
(
sequence_output_t,
sequence_output_v,
pooled_output_t,
pooled_output_v,
attention_weights,
_encoded_layers_t_output,
_encoded_layers_v_output,
) = self.bert(
input_ids,
image_feature,
image_location,
token_type_ids,
attention_mask,
image_attention_mask,
output_all_encoded_layers=False,
output_all_attention_masks=output_all_attention_masks,
)
prediction_scores_t, prediction_scores_v, seq_relationship_score = self.cls(
sequence_output_t, sequence_output_v, pooled_output_t, pooled_output_v
)
output = {}
if not torch.jit.is_scripting() and output_all_attention_masks:
output["attention_weights"] = attention_weights
if image_label is not None and image_target is not None:
if self.visual_target == 1:
img_loss = self.vis_criterion(prediction_scores_v, image_target)
masked_img_loss = torch.sum(
img_loss * torch.eq(image_label, 1).unsqueeze(2).float()
) / max(
torch.sum(
torch.eq(image_label, 1).unsqueeze(2).expand_as(img_loss)
),
1,
)
elif self.visual_target == 0:
img_loss = self.vis_criterion(
F.log_softmax(prediction_scores_v, dim=2), image_target
)
masked_img_loss = torch.sum(
img_loss * torch.eq(image_label, 1).unsqueeze(2).float()
) / max(torch.sum(torch.eq(image_label, 1)), 0)
elif self.visual_target == 2:
# generate negative sampled index.
num_across_batch = int(self.num_negative * 0.7)
num_inside_batch = int(self.num_negative * 0.3)
batch_size, num_regions, _ = prediction_scores_v.size()
assert batch_size != 0
# random negative across batches.
row_across_index = torch.ones(
batch_size,
num_regions,
num_across_batch,
dtype=input_ids.dtype,
device=input_ids.device,
).random_(0, batch_size - 1)
col_across_index = torch.ones(
batch_size,
num_regions,
num_across_batch,
dtype=input_ids.dtype,
device=input_ids.device,
).random_(0, num_regions)
for i in range(batch_size - 1):
row_across_index[i][row_across_index[i] == i] = batch_size - 1
final_across_index = row_across_index * num_regions + col_across_index
# random negative inside batches.
row_inside_index = torch.zeros(
batch_size,
num_regions,
num_inside_batch,
dtype=input_ids.dtype,
device=input_ids.device,
)
col_inside_index = torch.ones(
batch_size,
num_regions,
num_inside_batch,
dtype=input_ids.dtype,
device=input_ids.device,
).random_(0, num_regions - 1)
for i in range(batch_size):
row_inside_index[i] = i
for i in range(num_regions - 1):
col_inside_index[:, i, :][col_inside_index[:, i, :] == i] = (
num_regions - 1
)
final_inside_index = row_inside_index * num_regions + col_inside_index
final_index = torch.cat((final_across_index, final_inside_index), dim=2)
# Let's first sample where we need to compute.
predict_v = prediction_scores_v[image_label == 1]
neg_index_v = final_index[image_label == 1]
flat_image_target = image_target.view(batch_size * num_regions, -1)
# we also need to append the target feature at the beginning.
negative_v = flat_image_target[neg_index_v]
positive_v = image_target[image_label == 1]
sample_v = torch.cat((positive_v.unsqueeze(1), negative_v), dim=1)
# calculate the loss.
score = torch.bmm(sample_v, predict_v.unsqueeze(2)).squeeze(2)
masked_img_loss = self.vis_criterion(
score,
torch.zeros(
score.size(0), dtype=input_ids.dtype, device=input_ids.device
),
)
if masked_img_loss is not None:
output["masked_img_loss"] = masked_img_loss.unsqueeze(0)
if masked_lm_labels is not None:
masked_lm_loss = self.loss_fct(
prediction_scores_t.view(-1, self.vocab_size), masked_lm_labels.view(-1)
)
output["masked_lm_loss"] = masked_lm_loss.unsqueeze(0)
# next_sentence_loss = self.loss_fct(
# seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)
# )
# output["next_sentence_loss"] = next_sentence_loss.unsqueeze(0)
return output
class ViLBERTForClassification(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.bert = ViLBERTBase.from_pretrained(
self.config.bert_model_name,
config=BertConfig.from_dict(
OmegaConf.to_container(self.config, resolve=True)
),
cache_dir=os.path.join(get_mmf_cache_dir(), "distributed_{}".format(-1)),
)
self.training_head_type = self.config.training_head_type
self.num_labels = self.config.num_labels
self.fusion_method = config.fusion_method
self.dropout = nn.Dropout(self.config.hidden_dropout_prob)
# Create a copy of config since struct mode won't allow direct overrides
# classifier_config is only needed for initializing the classifier
classifier_config = deepcopy(config)
classifier_config.hidden_size = config.bi_hidden_size
if self.config.training_head_type == "nlvr2":
classifier_config.hidden_size *= 2
self.classifier = nn.Sequential(
BertPredictionHeadTransform(classifier_config),
nn.Linear(classifier_config.hidden_size, self.num_labels),
)
self.init_weights()
def init_weights(self):
if self.config.random_initialize is False:
if self.config.bert_model_name is None:
# No pretrained model, init weights
self.bert.init_weights()
# Classifier needs to be initialized always as it is task specific
self.classifier.apply(self.bert._init_weights)
def forward(
self,
input_ids: Tensor,
image_feature: Tensor,
image_location: Tensor,
token_type_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
image_attention_mask: Optional[Tensor] = None,
masked_lm_labels: Optional[Tensor] = None,
image_label: Optional[Tensor] = None,
image_target: Optional[Tensor] = None,
next_sentence_label: Optional[Tensor] = None,
output_all_attention_masks: bool = False,
) -> Dict[str, Tensor]:
(
sequence_output_t,
sequence_output_v,
pooled_output_t,
pooled_output_v,
attention_weights,
_encoded_layers_t_output,
_encoded_layers_v_output,
) = self.bert(
input_ids,
image_feature,
image_location,
token_type_ids,
attention_mask,
image_attention_mask,
output_all_encoded_layers=False,
output_all_attention_masks=output_all_attention_masks,
)
output = {}
if not torch.jit.is_scripting() and output_all_attention_masks:
output["attention_weights"] = attention_weights
if self.fusion_method == "sum":
pooled_output = self.dropout(pooled_output_t + pooled_output_v)
elif self.fusion_method == "mul":
pooled_output = self.dropout(pooled_output_t * pooled_output_v)
else:
raise AssertionError
if self.training_head_type == "nlvr2":
pooled_output = pooled_output.view(-1, pooled_output.size(1) * 2)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, self.num_labels)
output["scores"] = reshaped_logits
return output
@registry.register_model("vilbert")
class ViLBERT(BaseModel):
def __init__(self, config):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/vilbert/pretrain.yaml"
# Backward compatibility
@classmethod
def format_state_key(cls, key):
return (
key.replace("bert.bert", "model.bert")
.replace("bert.cls", "model.cls")
.replace("bert.classifier", "model.classifier")
)
def build(self):
if self.config.training_head_type == "pretraining":
self.model = ViLBERTForPretraining(self.config)
else:
self.model = ViLBERTForClassification(self.config)
if getattr(self.config, "freeze_base", False):
for p in self.model.bert.parameters():
p.requires_grad = False
def get_image_and_text_features(self, sample_list):
bert_input_ids = sample_list.input_ids
bert_input_mask = sample_list.input_mask
bert_input_type_ids = sample_list.segment_ids
if sample_list.dataset_name == "nlvr2":
bert_input_ids = torch.cat([bert_input_ids, bert_input_ids])
bert_input_mask = torch.cat([bert_input_mask, bert_input_mask])
bert_input_type_ids = torch.cat([bert_input_type_ids, bert_input_type_ids])
# image input
img0 = getattr(sample_list, "img0", {})
image_info = getattr(img0, "image_info_0", {})
image_dim_variable_0 = getattr(image_info, "max_features", None)
image_feature_variable_0 = getattr(img0, "image_feature_0", None)
image_location_variable_0 = getattr(image_info, "bbox", None)
img1 = getattr(sample_list, "img1", {})
image_info = getattr(img1, "image_info_0", {})
image_dim_variable_1 = getattr(image_info, "max_features", None)
image_feature_variable_1 = getattr(img1, "image_feature_0", None)
image_location_variable_1 = getattr(image_info, "bbox", None)
image_feature_variable = torch.cat(
[image_feature_variable_0, image_feature_variable_1]
)
image_location_variable = torch.cat(
[image_location_variable_0, image_location_variable_1]
)
image_dim_variable = torch.cat([image_dim_variable_0, image_dim_variable_1])
image_label_variable = None
image_target_variable = None
else:
image_info = getattr(sample_list, "image_info_0", {})
image_dim_variable = getattr(image_info, "max_features", None)
image_feature_variable = getattr(sample_list, "image_feature_0", None)
image_label_variable = getattr(sample_list, "image_labels", None)
image_location_variable = getattr(image_info, "bbox", None)
cls_prob = getattr(image_info, "cls_prob", None).cpu()
image_target = np.array(cls_prob, dtype=np.float32)
image_target_variable = torch.tensor(
image_target, dtype=torch.float, device=bert_input_ids.device
)
# add image dim variable
image_dim_variable = sample_list["image_feature_0"].new_full(
size=(image_feature_variable.size(0), 1),
fill_value=image_feature_variable.size(1))
return {
"input_ids": bert_input_ids,
"attention_mask": bert_input_mask,
"token_type_ids": bert_input_type_ids,
"image_dim": image_dim_variable,
"image_feature": image_feature_variable,
"image_location": image_location_variable,
"image_target": image_target_variable,
"image_label": image_label_variable,
}
def get_optimizer_parameters(self, config):
return get_optimizer_parameters_for_bert(self.model, config)
def forward(self, sample_list):
# see the difference in sample list for vilbert and visual bert
'''
mmf_predict config=projects/hateful_memes/configs/vilbert/defaults.yaml model=vilbert dataset=hateful_memes run_type=test checkpoint.resume_file=/Users/JQJiang/.cache/torch/mmf/data/models/vilbert.finetuned.hateful_memes.direct/model.pth checkpoint.resume_pretrained=False
:param sample_list:
:return:
'''
params = self.get_image_and_text_features(sample_list)
# pretraining labels
params["masked_lm_labels"] = getattr(sample_list, "lm_label_ids", None)
# is_random_next = getattr(sample_list, "is_correct", None)
# TODO(aps): Fix on dataset side
# params["is_random_next"] = None
# Prepare Mask
if params["image_feature"] is not None and params["image_dim"] is not None:
image_mask = torch.arange(
params["image_feature"].size(-2), device=params["image_feature"].device
).expand(*params["image_feature"].size()[:-1])
if len(params["image_dim"].size()) < len(image_mask.size()):
params["image_dim"] = params["image_dim"].unsqueeze(-1)
assert len(params["image_dim"].size()) == len(image_mask.size())
image_mask = image_mask < params["image_dim"]
params["image_attention_mask"] = image_mask.long()
else:
params["image_attention_mask"] = None
params.pop("image_dim")
output_dict = self.model(
params["input_ids"],
params["image_feature"],
params["image_location"],
params["token_type_ids"],
params["attention_mask"],
params["image_attention_mask"],
params["masked_lm_labels"],
params["image_label"],
params["image_target"],
)
if self.config.training_head_type == "pretraining":
loss_key = "{}/{}".format(
sample_list.dataset_name, sample_list.dataset_type
)
output_dict["losses"] = {}
output_dict["losses"][loss_key + "/masked_lm_loss"] = output_dict.pop(
"masked_lm_loss"
)
output_dict["losses"][loss_key + "/masked_img_loss"] = output_dict.pop(
"masked_img_loss"
)
return output_dict
@classmethod
def from_pretrained(cls, model_name, *args, **kwargs):
model = super().from_pretrained(model_name, *args, **kwargs)
config = load_pretrained_model(model_name)["full_config"]
OmegaConf.set_struct(config, True)
return FeatureModelInterface(model, config, "vilbert")
| [
"torch.nn.Linear",
"torch.cat",
"torch.ones",
"torch.jit.is_scripting",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.tensor",
"torch.nn.KLDivLoss",
"torch.zeros_like",
"torch.zeros",
"torch.nn.functional.log_softmax",
"torch.nn.ReLU",
"torch.nn.functional.softmax",
"torch.matmul",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.eq",
"torch.no_grad",
"torch.ones_like"
] | 1.6.0 | junqi-jiang/mmf | c8f47a23b85a87d14616c2f53e81693a25ea929a |
1.4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import typer
import time
import gym
import torch
import collections
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import pygame
from pygame.locals import VIDEORESIZE
import gym.wrappers as gymwr
from models import wrappers
from models import dqn_model
class ModelAgent(object):
"""Acts acording to loaded Pytorch model."""
def __init__(self, env, model_path):
# Load the model and set it to inference.
self.model = dqn_model.DQN(env.observation_space.shape,
env.action_space.n)
self.model.load_state_dict(torch.load(model_path,
map_location=torch.device("cpu")))
self.model.eval()
def act(self, observation):
obs = torch.tensor(np.array([observation], copy=False))
q_values = self.model(obs).data.numpy()[0]
action = np.argmax(q_values)
return action
def value(self, observation, action):
obs = torch.tensor(np.array([observation], copy=False))
q_values = self.model(obs).data.numpy()[0]
return q_values[action]
class ModelEmbeddings(object):
"""Returns the values from the last hidden layer of a model."""
def __init__(self, env, model_path):
# Load the model and set it to inference.
self.model = dqn_model.DQN_emb(env.observation_space.shape,
env.action_space.n)
state_dict = torch.load(model_path, map_location=torch.device("cpu"))
del state_dict["fc.2.weight"]
del state_dict["fc.2.bias"]
self.model.load_state_dict(state_dict)
self.model.eval()
def embeddings(self, observation):
obs = torch.tensor(np.array([observation], copy=False))
embeddings = self.model(obs).data.numpy()[0]
return embeddings
def play_gym_model(
game: str = typer.Argument("PongNoFrameskip-v4"),
model: Path = typer.Argument("models/DQL-PongNoFrameskip-v4-mr18.pt"),
episodes: int = typer.Option(
1,
show_default=True,
help="Number of runs of the environment to simulate.",
),
fps: int = typer.Option(
30,
show_default=True,
help="Frames per second (approximately)."
),
verbose: bool = typer.Option(
False,
show_default=True,
help="Print action, reward and observation at every step.",
),
plot_tsne: bool = typer.Option(
False,
show_default=True,
help="Record last layer embeddings for a tSNE representation."
),
monitor: bool = typer.Option(
False,
show_default=True,
help="Activate a monitor to record a video of the results."
),
logger: str = typer.Option(
"WARN",
show_default=True,
help="Select logger option, from INFO, WARN or DEBUG.",
),
outdir: Path = typer.Option(
Path.cwd()/"reports/videos",
help=("Output directory for the results of the monitor."+
"[default: ./reports/videos]"),
),
):
"""Play an OpenAI Gym game using a DQL agent previously trained.
This module creates an OpenAI Gym environment and executes actions
as dictated from a learned policy. The game is rendered on screen,
and the results can be recorded. The default game is "PongNoFrameskip-v4".
"""
typer.echo(f"Playing {game} with a trained agent.")
# Set the logger level
if logger == "INFO":
gym.logger.set_level(gym.logger.INFO)
elif logger == "DEBUG":
gym.logger.set_level(gym.logger.DEBUG)
elif logger == "WARN":
gym.logger.set_level(gym.logger.WARN)
# Make and wrap the environment
env = wrappers.make_env(game)
if monitor:
env = gymwr.Monitor(env, directory=outdir, force=True)
# Set up the agent
agent = ModelAgent(env, model)
if plot_tsne:
embnet = ModelEmbeddings(env, model)
embeddings = []
actions = []
rewards = []
q_values = []
# Make it so you can zoom on the window
rendered = env.render(mode='rgb_array')
zoom = 1.0
video_size=[rendered.shape[1], rendered.shape[0]]
video_size = int(video_size[0] * zoom), int(video_size[1] * zoom)
screen = pygame.display.set_mode(video_size)
done = False
for ep in range(episodes):
typer.echo(f"Starting episode {ep}.")
total_reward = 0
state = env.reset()
state_count = 0
action_counter = collections.Counter()
while True:
start_ts = time.time()
if not monitor: env.render()
action = agent.act(state)
action_counter[action] += 1
new_state, reward, done, _ = env.step(action)
total_reward += reward
if plot_tsne:
embeddings.append(embnet.embeddings(state))
actions.append(action)
rewards.append(reward)
q_values.append(agent.value(state, action))
if verbose:
typer.echo(f"{action} {reward} {state}")
if done:
typer.echo(f"Game reached end-state in frame {state_count}, "
f"achieving a total reward of {total_reward}.")
break
# Stop for a bit to make gameplay slower in the renderer
delta = 1/fps - (time.time() - start_ts)
if (delta > 0) and not monitor:
time.sleep(delta)
state_count += 1
state = new_state
env.env.close()
if plot_tsne:
import matplotlib as mpl
typer.echo("Performing t-SNE embedding.")
X_tsne = TSNE(n_components=2).fit_transform(np.array(embeddings))
data = pd.DataFrame(data = {"tsne0": X_tsne[:, 0],
"tsne1": X_tsne[:, 1],
"action": actions,
"reward": rewards,
"q_value": q_values,
})
meanings = ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE']
markers = ["o", "x", "^", "v", "^", "v"]
data["Action"] = data["action"].apply(lambda x: meanings[x])
cmap = mpl.colors.Colormap("viridis")
norm = mpl.colors.Normalize(vmin=0, vmax=max(data["q_value"]))
scmap = mpl.cm.ScalarMappable(norm, cmap)
fig, ax = plt.subplots(figsize=(10, 10))
ax.set_title("Two-dimensional t-SNE embedding of the representations\n"
"in the last hidden layer assigned by DQN to game states")
for marker, action in zip(markers, meanings):
a_data = data[data["Action"]==action]
plt.scatter(a_data["tsne0"], a_data["tsne1"], c=a_data["q_value"],
cmap="viridis", norm=norm, alpha=0.7, s=50.0,
marker=marker)
plt.savefig(Path.cwd()/"reports/figures/tsne_q_values.png")
plt.close()
if __name__ == "__main__":
typer.run(play_gym_model)
| [
"torch.device"
] | 1.4.0 | miguel-bm/ai6rl | cd751b734e485784e0d42bf4af7068860635ea91 |
1.7 | import pytest
import torch
from flash.core.serve.types import Label
def test_path(session_global_datadir):
label = Label(path=str(session_global_datadir / "imagenet_labels.txt"))
assert label.deserialize("chickadee") == torch.tensor(19)
assert label.serialize(torch.tensor(19)) == "chickadee"
def test_list():
label = Label(classes=["classA", "classB"])
assert label.deserialize("classA") == torch.tensor(0)
def test_dict():
label = Label(classes={56: "classA", 48: "classB"})
assert label.deserialize("classA") == torch.tensor(56)
with pytest.raises(TypeError):
Label(classes={"wrongtype": "classA"})
def test_wrong_type():
with pytest.raises(TypeError):
Label(classes=set())
with pytest.raises(ValueError):
Label(classes=None)
| [
"torch.tensor"
] | 1.7.1 | Actis92/lightning-flash | 49972268cfc0f95f1bd2b8fbf25036970cc44b59 |
0.3 | import os
import torch
import datasets
import translation_models.model as tmm
import translation_models.help_fun as transl_hf
import onmt
import model_manager
import quantization
import copy
import pickle
import functools
import quantization.help_functions as qhf
import helpers.functions as mhf
cuda_devices = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
print('CUDA_VISIBLE_DEVICES: {} for a total of {}'.format(cuda_devices, len(cuda_devices)))
datasets.BASE_DATA_FOLDER = '...'
SAVED_MODELS_FOLDER = '...'
USE_CUDA = torch.cuda.is_available()
NUM_GPUS = len(cuda_devices)
TRAIN_TEACHER_MODEL=False
TRAIN_SMALLER_MODEL=False
TRAIN_SEQUENCE_DISTILLED=False
TRAIN_WORD_DISTILLED=False
TRAIN_QUANTIZED_DISTILLED=False
TRAIN_DIFFERENTIABLE_QUANTIZATION=False
CREATE_DISTILLATION_DATASET=False
COMPUTE_BLEU_MODELS = False
CHECK_PM_QUANTIZATION = True
COMPUTE_WORD_PERCENTAGE_SIMILARITY = True
try:
os.mkdir(datasets.BASE_DATA_FOLDER)
except:pass
try:
os.mkdir(SAVED_MODELS_FOLDER)
except:pass
epochsToTrainOnmtIntegDataset = 15
onmtManager = model_manager.ModelManager('model_manager_integ_dataset.tst',
'model_manager', create_new_model_manager=False)
for x in onmtManager.list_models():
if onmtManager.get_num_training_runs(x) > 0:
print(x, onmtManager.load_metadata(x)[1]['perplexity'][-1])
integ_dataset_saved_models_folder = os.path.join(SAVED_MODELS_FOLDER, 'integ_dataset')
try:
os.mkdir(integ_dataset_saved_models_folder)
except:pass
#load the data
batch_size = 64 * NUM_GPUS
if batch_size % NUM_GPUS != 0:
raise ValueError('Batch size: {} must be a multiple of the number of gpus:{}'.format(batch_size, NUM_GPUS))
transl_dataset = datasets.onmt_integ_dataset(pin_memory=True)
train_loader, test_loader = transl_dataset.getTrainLoader(batch_size), transl_dataset.getTestLoader(batch_size)
#Teacher model
teacherOptions = copy.deepcopy(onmt.standard_options.stdOptions)
#it only matter in the creation of the distillation dataset
teacherOptions['rnn_size'] = 500
teacherOptions['epochs'] = epochsToTrainOnmtIntegDataset
teacherModel_name = 'integ_dataset_teacherModel'
teacherModelPath = os.path.join(integ_dataset_saved_models_folder, teacherModel_name)
teacherModel = tmm.create_model(transl_dataset.fields, options=teacherOptions)
if USE_CUDA: teacherModel = teacherModel.cuda()
if teacherModel_name not in onmtManager.saved_models:
onmtManager.add_new_model(teacherModel_name, teacherModelPath,
arguments_creator_function=teacherOptions)
if TRAIN_TEACHER_MODEL:
onmtManager.train_model(teacherModel, model_name=teacherModel_name,
train_function=tmm.train_model,
arguments_train_function={'options':teacherOptions},
train_loader=train_loader, test_loader=test_loader)
if onmtManager.get_num_training_runs(teacherModel_name) > 0:
teacherModel.load_state_dict(onmtManager.load_model_state_dict(teacherModel_name))
#now create a distillation dataset
standardTranslateOptions = onmt.standard_options.standardTranslationOptions
create_distilled_dataset_options = copy.deepcopy(teacherOptions)
folder_distillation_dataset = os.path.join(transl_dataset.dataFolder, 'distilled_dataset_' + teacherModel_name)
if CREATE_DISTILLATION_DATASET:
print('Creating distillation dataset from scratch')
transl_hf.create_distillation_dataset(teacherModel, create_distilled_dataset_options, standardTranslateOptions,
transl_dataset, folder_distillation_dataset)
print('Distillation dataset created')
try:
distilled_dataset = datasets.translation_datasets.TranslationDataset(folder_distillation_dataset, src_language='de',
tgt_language='en', pin_memory=True)
train_distilled_loader, test_distilled_loader = distilled_dataset.getTrainLoader(batch_size), distilled_dataset.getTestLoader(batch_size)
print('Distillation dataset loaded')
except:
print('Problems loading the distillation dataset')
#just so they don't raise errors..
distilled_dataset = transl_dataset
train_distilled_loader = train_loader
test_distilled_loader = test_loader
# quick last minute experiment of distill vs normal loss
# smallerOptions = copy.deepcopy(onmt.standard_options.stdOptions)
# #if not specified, it was trained with 2 layers (2 for encoder and 2 for decoder, that is) with rnn size of 200
# smallerOptions['batch_size'] = batch_size
# smallerOptions['rnn_size'] = 512
# smallerOptions['layers'] = 1
# smallerOptions['epochs'] = epochsToTrainOnmtIntegDataset
# for numBit in [4]:
# model_name = 'integ_dataset_smallerModel_{}rnn_size1_layer_quantized{}bits'.format(512, numBit)
# smallerModelPath = os.path.join(integ_dataset_saved_models_folder, model_name)
# smallerModel = tmm.create_model(transl_dataset.fields, options=smallerOptions)
# if USE_CUDA: smallerModel = smallerModel.cuda()
# if model_name not in onmtManager.saved_models:
# onmtManager.add_new_model(model_name, smallerModelPath,
# arguments_creator_function=smallerOptions)
# onmtManager.train_model(smallerModel, model_name=model_name,
# train_function=tmm.train_model,
# arguments_train_function={'options':smallerOptions,
# 'quantizeWeights': True,
# 'numBits':numBit,
# 'bucket_size':256},
# train_loader=train_loader, test_loader=test_loader)
# if onmtManager.get_num_training_runs(model_name) > 0:
# smallerModel.load_state_dict(onmtManager.load_model_state_dict(model_name))
# print('finished training, computing BLEU')
# bleu = transl_hf.get_bleu_model(smallerModel, transl_dataset, smallerOptions, standardTranslateOptions)
# bleu='not computed'
# ppl = tmm.evaluate_model(smallerModel, test_loader).ppl()
# print('BLEU is : {}'.format(bleu))
del teacherModel
rnn_sizes = [128, 256, 512]
numBits = [2,4,8]
# for rnn_size in rnn_sizes:
# #smaller model
# smallerOptions = copy.deepcopy(onmt.standard_options.stdOptions)
# #if not specified, it was trained with 2 layers (2 for encoder and 2 for decoder, that is) with rnn size of 200
# smallerOptions['batch_size'] = batch_size
# smallerOptions['rnn_size'] = rnn_size
# smallerOptions['layers'] = 1
# smallerOptions['epochs'] = epochsToTrainOnmtIntegDataset
# model_name = 'integ_dataset_smallerModel_{}rnn_size1_layer'.format(rnn_size)
# smallerModelPath = os.path.join(integ_dataset_saved_models_folder, model_name)
# smallerModel = tmm.create_model(transl_dataset.fields, options=smallerOptions)
# if USE_CUDA: smallerModel = smallerModel.cuda()
# if model_name not in onmtManager.saved_models:
# onmtManager.add_new_model(model_name, smallerModelPath,
# arguments_creator_function=smallerOptions)
# if TRAIN_SMALLER_MODEL:
# onmtManager.train_model(smallerModel, model_name=model_name,
# train_function=tmm.train_model,
# arguments_train_function={'options':smallerOptions},
# train_loader=train_loader, test_loader=test_loader)
# if onmtManager.get_num_training_runs(model_name) > 0:
# smallerModel.load_state_dict(onmtManager.load_model_state_dict(model_name))
# del smallerModel
#
# #Distilled model with word-level knowledge distillation
# teacherModel = tmm.create_model(transl_dataset.fields, options=teacherOptions)
# if USE_CUDA: teacherModel = teacherModel.cuda()
# teacherModel.load_state_dict(onmtManager.load_model_state_dict(teacherModel_name))
#
# distilledOptions = copy.deepcopy(smallerOptions)
# distilled_model_name = 'integ_dataset_distilledModel_word_level_{}rnn_size1_layer'.format(rnn_size)
# distilled_model_word_level = tmm.create_model(transl_dataset.fields, options=distilledOptions)
# if USE_CUDA: distilled_model_word_level = distilled_model_word_level.cuda()
# distilledModelPath = os.path.join(integ_dataset_saved_models_folder, distilled_model_name)
# if distilled_model_name not in onmtManager.saved_models:
# onmtManager.add_new_model(distilled_model_name, distilledModelPath,
# arguments_creator_function=distilledOptions)
# if TRAIN_WORD_DISTILLED:
# onmtManager.train_model(distilled_model_word_level, model_name=distilled_model_name,
# train_function=tmm.train_model,
# arguments_train_function={'options':distilledOptions,
# 'teacher_model': teacherModel,
# 'use_distillation_loss':True},
# train_loader=train_loader, test_loader=test_loader)
# if onmtManager.get_num_training_runs(distilled_model_name) > 0:
# distilled_model_word_level.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name))
# del distilled_model_word_level
#
# #Quantized word level distillation
# for numBit in numBits:
# distilled_model_name_quantized = 'integ_dataset_distilledModel_word_level_quantized{}bits{}rnn_size1_layer'.format(
# numBit, rnn_size)
# distilled_model_word_level = tmm.create_model(transl_dataset.fields, options=distilledOptions)
# if USE_CUDA: distilled_model_word_level = distilled_model_word_level.cuda()
# distilledModelPath = os.path.join(integ_dataset_saved_models_folder, distilled_model_name_quantized)
# if distilled_model_name_quantized not in onmtManager.saved_models:
# onmtManager.add_new_model(distilled_model_name_quantized, distilledModelPath,
# arguments_creator_function=distilledOptions)
# if TRAIN_WORD_DISTILLED and TRAIN_QUANTIZED_DISTILLED:
# onmtManager.train_model(distilled_model_word_level, model_name=distilled_model_name_quantized,
# train_function=tmm.train_model,
# arguments_train_function={'options':distilledOptions,
# 'teacher_model': teacherModel,
# 'use_distillation_loss':True,
# 'quantizeWeights':True,
# 'numBits':numBit,
# 'bucket_size':256},
# train_loader=train_loader, test_loader=test_loader)
# if onmtManager.get_num_training_runs(distilled_model_name_quantized) > 0:
# distilled_model_word_level.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name_quantized))
# del distilled_model_word_level
#
# #optimize quantization points
# if numBit == 8:#but no 8 bits with differentiable quantization
# continue
#
# optQuanPointOptions = copy.deepcopy(onmt.standard_options.stdOptions)
# optQuanPointOptions['learning_rate'] = 1e-4
# optQuanPointOptions['epochs'] = 3
# learning_rate_str = str(optQuanPointOptions['learning_rate'])
# save_path = onmtManager.get_model_base_path(distilled_model_name) + \
# 'quant_points_{}bit_bucket_size256'.format(numBit)
# if TRAIN_DIFFERENTIABLE_QUANTIZATION:
# distilledModel = tmm.create_model(transl_dataset.fields, options=distilledOptions)
# distilledModel.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name))
# if USE_CUDA: distilledModel = distilledModel.cuda()
# points, infoDict = tmm.optimize_quantization_points(distilledModel, train_loader, test_loader,
# optQuanPointOptions, numPointsPerTensor=2**numBit,
# bucket_size=256, assignBitsAutomatically=True,
# use_distillation_loss=True)
# quantization_points = [x.data.view(1, -1).cpu().numpy().tolist()[0] for x in points]
# with open(save_path, 'wb') as p:
# pickle.dump((quantization_points, infoDict), p)
#print bleu for the models
example_translations=False
file_results = 'results_file_BLEU_models'
if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY:
with open(file_results, 'a') as fr:
fr.write('\n\n== New Testing Run == \n\n')
if COMPUTE_WORD_PERCENTAGE_SIMILARITY:
#we need the ref file with the teacher
teacherModelOptions = onmtManager.load_metadata('integ_dataset_teacherModel', 0)[0]
for key, val in teacherModelOptions.items(): #remeding to an old bug in save_metadata function
if val == 'None':
teacherModelOptions[key] = None
teacherModel = tmm.create_model(transl_dataset.fields, options=teacherModelOptions)
if USE_CUDA: teacherModel = teacherModel.cuda()
teacherModel.load_state_dict(onmtManager.load_model_state_dict('integ_dataset_teacherModel', 1))
pathTeacherTranslation = transl_hf.get_translation_file_model(teacherModel, transl_dataset,
teacherModelOptions, standardTranslateOptions)
for x in onmtManager.list_models():
if onmtManager.get_num_training_runs(x) == 0:
continue
modelOptions = onmtManager.load_metadata(x, 0)[0]
for key, val in modelOptions.items(): #remeding to an old bug in save_metadata function
if val == 'None':
modelOptions[key] = None
if 'distilled' in x and 'word' not in x:
dataset = distilled_dataset
else:
dataset = transl_dataset
model = tmm.create_model(dataset.fields, options=modelOptions)
if USE_CUDA: model = model.cuda()
model.load_state_dict(onmtManager.load_model_state_dict(x, 1))
if example_translations:
print('Example of translation for model: "{}"'.format(x))
num_examples = 5
linesToTranslate, translated_lines, referenceLines = transl_hf.get_translation_examples(model,
dataset,
num_examples,
modelOptions,
standardTranslateOptions,
shuffle_examples=False)
print('Original Sentences == Translation == Ref Translation')
print('\n'.join(' == '.join(x) for x in zip(linesToTranslate, translated_lines, referenceLines)))
if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY:
if COMPUTE_WORD_PERCENTAGE_SIMILARITY is False or (COMPUTE_WORD_PERCENTAGE_SIMILARITY and x != 'integ_dataset_teacherModel'):
file_translation_model = transl_hf.get_translation_file_model(model, dataset,
modelOptions, standardTranslateOptions)
else:
file_translation_model = pathTeacherTranslation
if COMPUTE_BLEU_MODELS:
bleu = transl_hf.get_bleu_moses(file_translation_model, dataset.testFilesPath[1], file_input=True)
else:
bleu = 'Not computed'
if COMPUTE_WORD_PERCENTAGE_SIMILARITY and x != 'integ_dataset_teacherModel':
percentage_word_similarity = transl_hf.compute_percentage_word_similarity(pathTeacherTranslation,
file_translation_model,
file_input=True)
else:
percentage_word_similarity = 'not computed'
else:
bleu = 'Not computed'
percentage_word_similarity = 'not computed'
perplexity = onmtManager.load_metadata(x,1)[1]['perplexity'][-1]
str_to_save = 'Model "{}" ==> Perplexity: {}, BLEU: {} Percentage word similarity with teacher: {}'.format(x,
perplexity,
bleu,
percentage_word_similarity)
if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY:
with open(file_results, 'a') as fr:
fr.write(str_to_save + '\n')
print(str_to_save)
curr_num_bit = onmtManager.load_metadata(x)[0].get('numBits', None)
if curr_num_bit is not None:
quant_fun = functools.partial(quantization.uniformQuantization, s=2**curr_num_bit, bucket_size=256)
actual_bit_huffmman = qhf.get_huffman_encoding_mean_bit_length(model.parameters(), quant_fun,
'uniform', s=2**curr_num_bit)
print('Effective bit Huffman: {} - Size reduction: {}'.format(actual_bit_huffmman,
mhf.get_size_reduction(actual_bit_huffmman, bucket_size=256)))
if CHECK_PM_QUANTIZATION:
if 'distilledModel_word_level' in x and 'quantized' not in x:
for numBit in numBits:
model.load_state_dict(onmtManager.load_model_state_dict(x, 1))
for p in model.parameters():
p.data = quantization.uniformQuantization(p.data, s=2**numBit, type_of_scaling='linear',
bucket_size=256)[0]
perplexity = tmm.evaluate_model(model, test_loader).ppl()
if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY:
file_translation_model = transl_hf.get_translation_file_model(model, dataset,
modelOptions,
standardTranslateOptions)
if COMPUTE_BLEU_MODELS:
bleu = transl_hf.get_bleu_moses(file_translation_model, dataset.testFilesPath[1],
file_input=True)
else:
bleu = 'Not computed'
if COMPUTE_WORD_PERCENTAGE_SIMILARITY:
percentage_word_similarity = transl_hf.compute_percentage_word_similarity(
pathTeacherTranslation,
file_translation_model,
file_input=True)
else:
percentage_word_similarity = 'not computed'
else:
bleu = 'Not computed'
percentage_word_similarity = 'not computed'
str_to_save = 'PM quantization of model "{}" with "{}" bits and bucket size 256: Perplexity : {}, BLEU: {}'.format(
x, numBit, perplexity, bleu)
str_to_save += 'Percentage word similarity with teacher:{}'.format(percentage_word_similarity)
if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY:
with open(file_results, 'a') as fr:
fr.write(str_to_save + '\n')
print(str_to_save)
quant_fun = functools.partial(quantization.uniformQuantization, s=2**numBit, bucket_size=256)
actual_bit_huffmman = qhf.get_huffman_encoding_mean_bit_length(model.parameters(), quant_fun,
'uniform', s=2**numBit)
print('Effective bit Huffman: {} - Size reduction: {}'.format(actual_bit_huffmman,
mhf.get_size_reduction(
actual_bit_huffmman,
bucket_size=256)))
#now for the models trained with the differentiable quantization algorithm
list_distilled_models = ['integ_dataset_distilledModel_word_level_{}rnn_size1_layer'.format(x)
for x in rnn_sizes]
optQuanPointOptions = copy.deepcopy(onmt.onmt.standard_options.stdOptions)
for idx_model_distilled, distilled_model_name_to_quantize in enumerate(list_distilled_models):
modelOptions = onmtManager.load_metadata(distilled_model_name_to_quantize, 0)[0]
for key, val in modelOptions.items(): # remeding to an old bug in save_metadata function
if val == 'None':
modelOptions[key] = None
dataset = transl_dataset #since we don't use sequence level distillation
for numBit in numBits:
if numBit == 8: continue
save_path = onmtManager.get_model_base_path(distilled_model_name_to_quantize) + \
'quant_points_{}bit_bucket_size256'.format(numBit)
with open(save_path, 'rb') as p:
quantization_points, infoDict = pickle.load(p)
distilledModel = tmm.create_model(dataset.fields, options=modelOptions)
distilledModel.load_state_dict(onmtManager.load_model_state_dict(distilled_model_name_to_quantize))
if USE_CUDA: distilledModel = distilledModel.cuda()
for idx, p in enumerate(distilledModel.parameters()):
p.data = quantization.nonUniformQuantization(p.data, quantization_points[idx], bucket_size=256)[0]
reported_perplexity = infoDict['perplexity'][-1]
perplexity = tmm.evaluate_model(distilledModel, test_loader).ppl()
if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY:
file_translation_model = transl_hf.get_translation_file_model(distilledModel, dataset,
modelOptions,
standardTranslateOptions)
if COMPUTE_BLEU_MODELS:
bleu = transl_hf.get_bleu_moses(file_translation_model, dataset.testFilesPath[1],
file_input=True)
else:
bleu = 'Not computed'
if COMPUTE_WORD_PERCENTAGE_SIMILARITY:
percentage_word_similarity = transl_hf.compute_percentage_word_similarity(
pathTeacherTranslation,
file_translation_model,
file_input=True)
else:
percentage_word_similarity = 'not computed'
else:
bleu = 'Not computed'
percentage_word_similarity = 'not computed'
str_to_save = 'Model "{}" ==> Reported perplexity : {}, Actual perplexity: {}, BLEU: {}'.format(
distilled_model_name_to_quantize + 'quant_points_{}bit_bucket_size256'.format(numBit),
reported_perplexity, perplexity, bleu)
str_to_save += 'Percentage word similarity with teacher:{}'.format(percentage_word_similarity)
if COMPUTE_BLEU_MODELS or COMPUTE_WORD_PERCENTAGE_SIMILARITY:
with open(file_results, 'a') as fr:
fr.write(str_to_save + '\n')
print(str_to_save)
quantization_functions = [functools.partial(quantization.nonUniformQuantization,
listQuantizationPoints=qp,
bucket_size=256) for qp in quantization_points]
actual_bit_huffmman = qhf.get_huffman_encoding_mean_bit_length(distilledModel.parameters(),
quantization_functions,
'nonUniform')
print('Effective bit Huffman: {} - Size reduction: {}'.format(actual_bit_huffmman,
mhf.get_size_reduction(
actual_bit_huffmman,
bucket_size=256)))
try:
os.remove(pathTeacherTranslation)
except:pass
try:
os.remove(file_translation_model)
except:pass
| [
"torch.cuda.is_available"
] | 0.3.1 | lijian10086/quantized_distillation | bb500b7ae48a3f6751d6434126de9845b58d2d65 |
0.4 | ###########################################################################
# Created by: Hang Zhang
# Email: [email protected]
# Copyright (c) 2017
###########################################################################
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import upsample
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.scatter_gather import scatter
from .. import dilated as resnet
from ..utils import batch_pix_accuracy, batch_intersection_union
up_kwargs = {'mode': 'bilinear', 'align_corners': True}
__all__ = ['BaseNet', 'MultiEvalModule']
class BaseNet(nn.Module):
def __init__(self, nclass, backbone, aux, se_loss, dilated=True, norm_layer=None,
mean=[.485, .456, .406], std=[.229, .224, .225], root='~/.encoding/models'):
super(BaseNet, self).__init__()
self.nclass = nclass
self.aux = aux
self.se_loss = se_loss
self.mean = mean
self.std = std
# copying modules from pretrained models
if backbone == 'resnet50':
self.pretrained = resnet.resnet50(pretrained=True, dilated=dilated,
norm_layer=norm_layer, root=root)
elif backbone == 'resnet101':
self.pretrained = resnet.resnet101(pretrained=True, dilated=dilated,
norm_layer=norm_layer, root=root)
elif backbone == 'resnet152':
self.pretrained = resnet.resnet152(pretrained=True, dilated=dilated,
norm_layer=norm_layer, root=root)
else:
raise RuntimeError('unknown backbone: {}'.format(backbone))
# bilinear upsample options
self._up_kwargs = up_kwargs
def base_forward(self, x):
x = self.pretrained.conv1(x)
x = self.pretrained.bn1(x)
x = self.pretrained.relu(x)
x = self.pretrained.maxpool(x)
c1 = self.pretrained.layer1(x)
c2 = self.pretrained.layer2(c1)
c3 = self.pretrained.layer3(c2)
c4 = self.pretrained.layer4(c3)
return c1, c2, c3, c4
def evaluate(self, x, target=None):
pred = self.forward(x)
if isinstance(pred, (tuple, list)):
pred = pred[0]
if target is None:
return pred
correct, labeled = batch_pix_accuracy(pred.data, target.data)
inter, union = batch_intersection_union(pred.data, target.data, self.nclass)
return correct, labeled, inter, union
class MultiEvalModule(DataParallel):
"""Multi-size Segmentation Eavluator"""
def __init__(self, module, nclass, device_ids=None,
base_size=520, crop_size=480, flip=True,
scales=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75]):
super(MultiEvalModule, self).__init__(module, device_ids)
self.nclass = nclass
self.base_size = base_size
self.crop_size = crop_size
self.scales = scales
self.flip = flip
def parallel_forward(self, inputs, **kwargs):
"""Multi-GPU Mult-size Evaluation
Args:
inputs: list of Tensors
"""
inputs = [(input.unsqueeze(0).cuda(device),) for input, device in zip(inputs, self.device_ids)]
replicas = self.replicate(self, self.device_ids[:len(inputs)])
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return outputs
def forward(self, image):
"""Mult-size Evaluation"""
# only single image is supported for evaluation
batch, _, h, w = image.size()
assert(batch == 1)
stride_rate = 2.0/3.0
crop_size = self.crop_size
stride = int(crop_size * stride_rate)
with torch.cuda.device_of(image):
scores = image.new().resize_(batch,self.nclass,h,w).zero_().cuda()
for scale in self.scales:
long_size = int(math.ceil(self.base_size * scale))
if h > w:
height = long_size
width = int(1.0 * w * long_size / h + 0.5)
short_size = width
else:
width = long_size
height = int(1.0 * h * long_size / w + 0.5)
short_size = height
# resize image to current size
cur_img = resize_image(image, height, width, **self.module._up_kwargs)
if long_size <= crop_size:
pad_img = pad_image(cur_img, self.module.mean,
self.module.std, crop_size)
outputs = module_inference(self.module, pad_img, self.flip)
outputs = crop_image(outputs, 0, height, 0, width)
else:
if short_size < crop_size:
# pad if needed
pad_img = pad_image(cur_img, self.module.mean,
self.module.std, crop_size)
else:
pad_img = cur_img
_,_,ph,pw = pad_img.size()
assert(ph >= height and pw >= width)
# grid forward and normalize
h_grids = int(math.ceil(1.0*(ph-crop_size)/stride)) + 1
w_grids = int(math.ceil(1.0*(pw-crop_size)/stride)) + 1
with torch.cuda.device_of(image):
outputs = image.new().resize_(batch,self.nclass,ph,pw).zero_().cuda()
count_norm = image.new().resize_(batch,1,ph,pw).zero_().cuda()
# grid evaluation
for idh in range(h_grids):
for idw in range(w_grids):
h0 = idh * stride
w0 = idw * stride
h1 = min(h0 + crop_size, ph)
w1 = min(w0 + crop_size, pw)
crop_img = crop_image(pad_img, h0, h1, w0, w1)
# pad if needed
pad_crop_img = pad_image(crop_img, self.module.mean,
self.module.std, crop_size)
output = module_inference(self.module, pad_crop_img, self.flip)
outputs[:,:,h0:h1,w0:w1] += crop_image(output,
0, h1-h0, 0, w1-w0)
count_norm[:,:,h0:h1,w0:w1] += 1
assert((count_norm==0).sum()==0)
outputs = outputs / count_norm
outputs = outputs[:,:,:height,:width]
score = resize_image(outputs, h, w, **self.module._up_kwargs)
scores += score
return scores
def module_inference(module, image, flip=True):
output = module.evaluate(image)
if flip:
fimg = flip_image(image)
foutput = module.evaluate(fimg)
output += flip_image(foutput)
return output.exp()
def resize_image(img, h, w, **up_kwargs):
return F.upsample(img, (h, w), **up_kwargs)
def pad_image(img, mean, std, crop_size):
b,c,h,w = img.size()
assert(c==3)
padh = crop_size - h if h < crop_size else 0
padw = crop_size - w if w < crop_size else 0
pad_values = -np.array(mean) / np.array(std)
img_pad = img.new().resize_(b,c,h+padh,w+padw)
for i in range(c):
# note that pytorch pad params is in reversed orders
img_pad[:,i,:,:] = F.pad(img[:,i,:,:], (0, padw, 0, padh), value=pad_values[i])
assert(img_pad.size(2)>=crop_size and img_pad.size(3)>=crop_size)
return img_pad
def crop_image(img, h0, h1, w0, w1):
return img[:,:,h0:h1,w0:w1]
def flip_image(img):
assert(img.dim()==4)
with torch.cuda.device_of(img):
idx = torch.arange(img.size(3)-1, -1, -1).type_as(img).long()
return img.index_select(3, idx)
| [
"torch.nn.functional.upsample",
"torch.cuda.device_of",
"torch.nn.parallel.scatter_gather.scatter",
"torch.nn.functional.pad"
] | 0.4.0 | youansheng/PyTorch-Encoding | dc501d28d478dbf668186f721c8600387c32859c |
0.4 | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='enclib_cpu',
ext_modules=[
CppExtension('enclib_cpu', [
'roi_align.cpp',
'roi_align_cpu.cpp',
]),
],
cmdclass={
'build_ext': BuildExtension
})
| [
"torch.utils.cpp_extension.CppExtension"
] | 0.4.0 | youansheng/PyTorch-Encoding | dc501d28d478dbf668186f721c8600387c32859c |
1.8 | import os
import argparse
import json
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from asteroid.models import ConvTasNet
from asteroid.engine.optimizers import make_optimizer
from asteroid.engine.system_kinect_wsj import System
from asteroid.losses import PITLossWrapper, pairwise_neg_sisdr
from asteroid.data.kinect_wsj import make_dataloaders
# Keys which are not in the conf.yml file can be added here.
# In the hierarchical dictionary created when parsing, the key `key` can be
# found at dic['main_args'][key]
# By default train.py will use all available GPUs. The `id` option in run.sh
# will limit the number of available GPUs for train.py .
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir",
default="exp/tmp",
help="Full path to save best validation model")
def main(conf):
exp_dir = conf["main_args"]["exp_dir"]
# Define Dataloader
train_loader, val_loader = make_dataloaders(**conf["data"],
**conf["training"])
conf["masknet"].update({"n_src": conf["data"]["n_src"]})
model = ConvTasNet(**conf["filterbank"],
**conf["masknet"],
sample_rate=conf["data"]["sample_rate"])
optimizer = make_optimizer(model.parameters(), **conf["optim"])
# Define scheduler
scheduler = None
if conf["training"]["half_lr"]:
scheduler = ReduceLROnPlateau(optimizer=optimizer,
factor=0.5,
patience=5)
# Just after instantiating, save the args. Easy loading in the future.
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, "conf.yml")
with open(conf_path, "w") as outfile:
yaml.safe_dump(conf, outfile)
# Define Loss function.
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
system = System(
model=model,
loss_func=loss_func,
optimizer=optimizer,
train_loader=train_loader,
val_loader=val_loader,
scheduler=scheduler,
config=conf,
)
# Define callbacks
callbacks = []
checkpoint_dir = os.path.join(exp_dir, "checkpoints/")
checkpoint = ModelCheckpoint(checkpoint_dir,
monitor="val_loss",
mode="min",
save_top_k=5,
verbose=True)
callbacks.append(checkpoint)
if conf["training"]["early_stop"]:
callbacks.append(
EarlyStopping(monitor="val_loss",
mode="min",
patience=30,
verbose=True))
# Don't ask GPU if they are not available.
gpus = -1 if torch.cuda.is_available() else None
distributed_backend = "ddp" if torch.cuda.is_available() else None
trainer = pl.Trainer(
max_epochs=conf["training"]["epochs"],
callbacks=callbacks,
default_root_dir=exp_dir,
gpus=gpus,
distributed_backend=distributed_backend,
limit_train_batches=1.0, # Useful for fast experiment
gradient_clip_val=5.0,
)
trainer.fit(system)
best_k = {k: v.item() for k, v in checkpoint.best_k_models.items()}
with open(os.path.join(exp_dir, "best_k_models.json"), "w") as f:
json.dump(best_k, f, indent=0)
state_dict = torch.load(checkpoint.best_model_path)
system.load_state_dict(state_dict=state_dict["state_dict"])
system.cpu()
to_save = system.model.serialize()
#to_save.update(train_set.get_infos())
torch.save(to_save, os.path.join(exp_dir, "best_model.pth"))
if __name__ == "__main__":
import yaml
from pprint import pprint
from asteroid.utils import prepare_parser_from_dict, parse_args_as_dict
# We start with opening the config file conf.yml as a dictionary from
# which we can create parsers. Each top level key in the dictionary defined
# by the YAML file creates a group in the parser.
with open("local/conf.yml") as f:
def_conf = yaml.safe_load(f)
parser = prepare_parser_from_dict(def_conf, parser=parser)
# Arguments are then parsed into a hierarchical dictionary (instead of
# flat, as returned by argparse) to facilitate calls to the different
# asteroid methods (see in main).
# plain_args is the direct output of parser.parse_args() and contains all
# the attributes in an non-hierarchical structure. It can be useful to also
# have it so we included it here but it is not used.
arg_dic, plain_args = parse_args_as_dict(parser, return_plain_args=True)
pprint(arg_dic)
main(arg_dic)
| [
"torch.cuda.is_available",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load"
] | 1.8.0 | ccan1995/asteroid | 782e95be17b6c16ed2b292d11b9063bf274ca346 |
1.9 | import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
# datasets
train_dataset = dsets.MNIST(root='../../../_data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = dsets.MNIST(root='../../../_data', train=False, transform=transforms.ToTensor())
# normalize dataset
batch_size = 100
n_iters = 3000
n_epochs = int(n_iters / (len(train_dataset) / batch_size))
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
class FeedforwardNeuralNetModel(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(FeedforwardNeuralNetModel, self).__init__()
# Linear function
self.fc1 = nn.Linear(input_dim, hidden_dim)
# Non-linearity
self.tanh = nn.Tanh()
# Linear function (readout)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
# Linear function
out = self.fc1(x)
# Non-linearity
out = self.tanh(out)
# Linear function (readout)
out = self.fc2(out)
return out
# define network
learning_rate = 0.1
input_dim = 28*28
hidden_dim = 100
output_dim = 10
network = FeedforwardNeuralNetModel(input_dim, hidden_dim, output_dim)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(network.parameters(), lr=learning_rate)
# trian network
for epoch in range(n_epochs):
for i, (images, labels) in enumerate(train_loader):
# Load images with gradient accumulation capabilities
images = images.view(-1, 28*28).requires_grad_()
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
outputs = network(images)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs, labels)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
if epoch % 500 == 0:
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
# Load images with gradient accumulation capabilities
images = images.view(-1, 28*28).requires_grad_()
# Forward pass only to get logits/output
outputs = network(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += labels.size(0)
# Total correct predictions
correct += (predicted == labels).sum()
accuracy = 100 * correct / total
# Print Loss
print(f'[{epoch}/{n_epochs}] Loss: {loss.item()}. Accuracy: {accuracy}') | [
"torch.nn.Linear",
"torch.max",
"torch.nn.Tanh",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss"
] | 1.9.0 | niektuytel/Machine_Learning | 0cd5656ca8076c383fd81c5e32a49969a20ad042 |
0.4 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.nn.functional as F
from . import FairseqCriterion, register_criterion
@register_criterion('adaptive_loss')
class AdaptiveLoss(FairseqCriterion):
"""This is an implementation of the loss function accompanying the adaptive softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs"
(http://arxiv.org/abs/1609.04309)."""
def __init__(self, args, target_vocabulary):
super().__init__(args, target_vocabulary)
def forward(self, model, sample, reduction='sum'):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert hasattr(model.decoder, 'adaptive_softmax') and model.decoder.adaptive_softmax is not None
adaptive_softmax = model.decoder.adaptive_softmax
# net_output = model(*sample['net_input'])
net_output = model(*sample['net_input'])
target = model.get_targets(sample).view(-1)
bsz = target.size(0)
logits, target = adaptive_softmax(net_output[0], target)
assert len(target) == len(logits)
loss = net_output[0].new(1 if reduction == 'sum' else bsz).zero_()
for i in range(len(target)):
if target[i] is not None:
assert (target[i].min() >= 0 and target[i].max() <= logits[i].size(1))
loss += F.cross_entropy(logits[i], target[i], ignore_index=self.padding_idx,
reduction=reduction)
sample_size = sample['target'].size(0) if self.args.batch_by_sentence else sample['ntokens']
logging_output = {
'loss': loss.data.item() if reduction != 'none' else loss.data,
'ntokens': sample['ntokens'],
'sample_size': sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {
'loss': loss_sum / sample_size,
'sample_size': sample_size,
}
if sample_size != ntokens:
# agg_output['nll_loss'] = loss_sum / ntokens / math.log(2)
agg_output['per_word_loss'] = loss_sum / ntokens
else:
agg_output['per_word_loss'] = agg_output['loss']
return agg_output
| [
"torch.nn.functional.cross_entropy"
] | 0.4.1 | DeepLearnXMU/ABDNMT-RNMT | c3b20e4afdbfee5741e95a42bbd31329bb9bb93d |
1.10 | import os
import json
import sys
import time
import torch
from torch import nn
from torch import optim
import torchvision as tv
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
from PIL import Image
import numpy as np
EPS = 1e-6
ALPHA_RECONSTRUCT_IMAGE = 1
ALPHA_RECONSTRUCT_LATENT = 0.5
ALPHA_DISCRIMINATE_IMAGE = 0.005
ALPHA_DISCRIMINATE_LATENT = 0.1
class Generator(nn.Module):
"""A generator for mapping a latent space to a sample space.
Input shape: (?, latent_dim)
Output shape: (?, 3, 96, 96)
"""
def __init__(self, latent_dim: int = 16):
"""Initialize generator.
Args:
latent_dim (int): latent dimension ("noise vector")
"""
super().__init__()
self.latent_dim = latent_dim
self._init_modules()
def build_colourspace(self, input_dim: int, output_dim: int):
"""Build a small module for selecting colours."""
colourspace = nn.Sequential(
nn.Linear(
input_dim,
128,
bias=True),
nn.BatchNorm1d(128),
nn.LeakyReLU(),
nn.Linear(
128,
64,
bias=True),
nn.BatchNorm1d(64),
nn.LeakyReLU(),
nn.Linear(
64,
output_dim,
bias=True),
nn.Tanh(),
)
return colourspace
def _init_modules(self):
"""Initialize the modules."""
projection_widths = [8, 8, 8, 8, 8, 8, 8]
self.projection_dim = sum(projection_widths) + self.latent_dim
self.projection = nn.ModuleList()
for index, i in enumerate(projection_widths):
self.projection.append(
nn.Sequential(
nn.Linear(
self.latent_dim + sum(projection_widths[:index]),
i,
bias=True,
),
nn.BatchNorm1d(8),
nn.LeakyReLU(),
)
)
self.projection_upscaler = nn.Upsample(scale_factor=3)
self.colourspace_r = self.build_colourspace(self.projection_dim, 16)
self.colourspace_g = self.build_colourspace(self.projection_dim, 16)
self.colourspace_b = self.build_colourspace(self.projection_dim, 16)
self.colourspace_upscaler = nn.Upsample(scale_factor=96)
self.seed = nn.Sequential(
nn.Linear(
self.projection_dim,
512*3*3,
bias=True),
nn.BatchNorm1d(512*3*3),
nn.LeakyReLU(),
)
self.upscaling = nn.ModuleList()
self.conv = nn.ModuleList()
self.upscaling.append(nn.Upsample(scale_factor=2))
self.conv.append(nn.Sequential(
nn.ZeroPad2d((1, 1, 1, 1)),
nn.Conv2d(
in_channels=(512)//4,
out_channels=512,
kernel_size=3,
stride=1,
padding=0,
bias=True
),
nn.BatchNorm2d(512),
nn.LeakyReLU(),
))
self.upscaling.append(nn.Upsample(scale_factor=2))
self.conv.append(nn.Sequential(
nn.ZeroPad2d((1, 2, 1, 2)),
nn.Conv2d(
in_channels=(512 + self.projection_dim)//4,
out_channels=256,
kernel_size=4,
stride=1,
padding=0,
bias=True
),
nn.BatchNorm2d(256),
nn.LeakyReLU(),
))
self.upscaling.append(nn.Upsample(scale_factor=2))
self.conv.append(nn.Sequential(
nn.ZeroPad2d((1, 2, 1, 2)),
nn.Conv2d(
in_channels=(256 + self.projection_dim)//4,
out_channels=256,
kernel_size=4,
stride=1,
padding=0,
bias=True
),
nn.BatchNorm2d(256),
nn.LeakyReLU(),
))
self.upscaling.append(nn.Upsample(scale_factor=2))
self.conv.append(nn.Sequential(
nn.ZeroPad2d((1, 2, 1, 2)),
nn.Conv2d(
in_channels=(256 + self.projection_dim)//4,
out_channels=256,
kernel_size=4,
stride=1,
padding=0,
bias=True
),
nn.BatchNorm2d(256),
nn.LeakyReLU(),
)),
self.upscaling.append(nn.Upsample(scale_factor=2))
self.conv.append(nn.Sequential(
nn.ZeroPad2d((1, 2, 1, 2)),
nn.Conv2d(
in_channels=(256 + self.projection_dim)//4,
out_channels=64,
kernel_size=4,
stride=1,
padding=0,
bias=True
),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
))
self.upscaling.append(nn.Upsample(scale_factor=1))
self.conv.append(nn.Sequential(
nn.ZeroPad2d((2, 2, 2, 2)),
nn.Conv2d(
in_channels=64,
out_channels=16,
kernel_size=5,
stride=1,
padding=0,
bias=True
),
nn.Softmax(dim=1),
))
def forward(self, input_tensor):
"""Forward pass; map latent vectors to samples."""
last = input_tensor
for module in self.projection:
projection = module(last)
last = torch.cat((last, projection), -1)
projection = last
intermediate = self.seed(projection)
intermediate = intermediate.view((-1, 512, 3, 3))
projection_2d = projection.view((-1, self.projection_dim, 1, 1))
projection_2d = self.projection_upscaler(projection_2d)
for i, (conv, upscaling) in enumerate(zip(self.conv, self.upscaling)):
if i + 1 != len(self.upscaling):
if i > 0:
intermediate = torch.cat((intermediate, projection_2d), 1)
intermediate = torch.nn.functional.pixel_shuffle(intermediate, 2)
intermediate = conv(intermediate)
projection_2d = upscaling(projection_2d)
r_space = self.colourspace_r(projection)
r_space = r_space.view((-1, 16, 1, 1))
r_space = self.colourspace_upscaler(r_space)
r_space = intermediate * r_space
r_space = torch.sum(r_space, dim=1, keepdim=True)
g_space = self.colourspace_g(projection)
g_space = g_space.view((-1, 16, 1, 1))
g_space = self.colourspace_upscaler(g_space)
g_space = intermediate * g_space
g_space = torch.sum(g_space, dim=1, keepdim=True)
b_space = self.colourspace_b(projection)
b_space = b_space.view((-1, 16, 1, 1))
b_space = self.colourspace_upscaler(b_space)
b_space = intermediate * b_space
b_space = torch.sum(b_space, dim=1, keepdim=True)
output = torch.cat((r_space, g_space, b_space), dim=1)
return output
class Encoder(nn.Module):
"""An Encoder for encoding images as latent vectors.
Input shape: (?, 3, 96, 96)
Output shape: (?, latent_dim)
"""
def __init__(self, device: str = "cpu", latent_dim: int = 8):
"""Initialize encoder.
Args:
device: chich GPU or CPU to use.
latent_dim: output dimension
"""
super().__init__()
self.device = device
self.latent_dim = latent_dim
self._init_modules()
def _init_modules(self):
"""Initialize the modules."""
down_channels = [3, 64, 128, 256, 512]
self.down = nn.ModuleList()
for i in range(len(down_channels)-1):
self.down.append(
nn.Sequential(
nn.Conv2d(
in_channels=down_channels[i],
out_channels=down_channels[i+1],
kernel_size=3,
stride=2,
padding=1,
bias=True,
),
nn.BatchNorm2d(down_channels[i+1]),
nn.LeakyReLU(),
)
)
self.reducer = nn.Sequential(
nn.Conv2d(
in_channels=down_channels[-1],
out_channels=down_channels[-2],
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
nn.BatchNorm2d(down_channels[-2]),
nn.LeakyReLU(),
nn.Upsample(scale_factor=2)
)
up_channels = [256, 128, 64, 64, 64]
scale_factors = [2, 2, 2, 1]
self.up = nn.ModuleList()
for i in range(len(up_channels)-1):
self.up.append(
nn.Sequential(
nn.Conv2d(
in_channels=up_channels[i] + down_channels[-2-i],
out_channels=up_channels[i+1],
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
nn.BatchNorm2d(up_channels[i+1]),
nn.LeakyReLU(),
nn.Upsample(scale_factor=scale_factors[i]),
)
)
down_again_channels = [64+3, 64, 64, 64, 64]
self.down_again = nn.ModuleList()
for i in range(len(down_again_channels)-1):
self.down_again.append(
nn.Conv2d(
in_channels=down_again_channels[i],
out_channels=down_again_channels[i+1],
kernel_size=3,
stride=2,
padding=1,
bias=True,
)
)
self.down_again.append(nn.BatchNorm2d(down_again_channels[i+1]))
self.down_again.append(nn.LeakyReLU())
self.projection = nn.Sequential(
nn.Linear(
512*6*6 + 64*6*6,
256,
bias=True,
),
nn.BatchNorm1d(256),
nn.LeakyReLU(),
nn.Linear(
256,
128,
bias=True,
),
nn.BatchNorm1d(128),
nn.LeakyReLU(),
nn.Linear(
128,
self.latent_dim,
bias=True,
),
)
def forward(self, input_tensor):
"""Forward pass; map latent vectors to samples."""
rv = torch.randn(input_tensor.size(), device=self.device) * 0.02
augmented_input = input_tensor + rv
intermediate = augmented_input
intermediates = [augmented_input]
for module in self.down:
intermediate = module(intermediate)
intermediates.append(intermediate)
intermediates = intermediates[:-1][::-1]
down = intermediate.view(-1, 6*6*512)
intermediate = self.reducer(intermediate)
for index, module in enumerate(self.up):
intermediate = torch.cat((intermediate, intermediates[index]), 1)
intermediate = module(intermediate)
intermediate = torch.cat((intermediate, input_tensor), 1)
for module in self.down_again:
intermediate = module(intermediate)
intermediate = intermediate.view(-1, 6*6*64)
intermediate = torch.cat((down, intermediate), -1)
projected = self.projection(intermediate)
return projected
class DiscriminatorImage(nn.Module):
"""A discriminator for discerning real from generated images.
Input shape: (?, 3, 96, 96)
Output shape: (?, 1)
"""
def __init__(self, device="cpu"):
"""Initialize the discriminator."""
super().__init__()
self.device = device
self._init_modules()
def _init_modules(self):
"""Initialize the modules."""
down_channels = [3, 64, 128, 256, 512]
self.down = nn.ModuleList()
leaky_relu = nn.LeakyReLU()
for i in range(4):
self.down.append(
nn.Conv2d(
in_channels=down_channels[i],
out_channels=down_channels[i+1],
kernel_size=3,
stride=2,
padding=1,
bias=True,
)
)
self.down.append(nn.BatchNorm2d(down_channels[i+1]))
self.down.append(leaky_relu)
self.classifier = nn.ModuleList()
self.width = down_channels[-1] * 6**2
self.classifier.append(nn.Linear(self.width, 1))
self.classifier.append(nn.Sigmoid())
def forward(self, input_tensor):
"""Forward pass; map latent vectors to samples."""
rv = torch.randn(input_tensor.size(), device=self.device) * 0.02
intermediate = input_tensor + rv
for module in self.down:
intermediate = module(intermediate)
rv = torch.randn(intermediate.size(), device=self.device) * 0.02 + 1
intermediate *= rv
intermediate = intermediate.view(-1, self.width)
for module in self.classifier:
intermediate = module(intermediate)
return intermediate
class DiscriminatorLatent(nn.Module):
"""A discriminator for discerning real from generated vectors.
Input shape: (?, latent_dim)
Output shape: (?, 1)
"""
def __init__(self, latent_dim=8, device="cpu"):
"""Initialize the Discriminator."""
super().__init__()
self.latent_dim = latent_dim
self.device = device
self._init_modules()
def _init_modules(self, depth=7, width=8):
"""Initialize the modules."""
self.pyramid = nn.ModuleList()
for i in range(depth):
self.pyramid.append(
nn.Sequential(
nn.Linear(
self.latent_dim + width*i,
width,
bias=True,
),
nn.BatchNorm1d(width),
nn.LeakyReLU(),
)
)
self.classifier = nn.ModuleList()
self.classifier.append(nn.Linear(depth*width + self.latent_dim, 1))
self.classifier.append(nn.Sigmoid())
def forward(self, input_tensor):
"""Forward pass; map latent vectors to samples."""
last = input_tensor
for module in self.pyramid:
projection = module(last)
rv = torch.randn(projection.size(), device=self.device) * 0.02 + 1
projection *= rv
last = torch.cat((last, projection), -1)
for module in self.classifier:
last = module(last)
return last
class AEGAN():
"""An Autoencoder Generative Adversarial Network for making pokemon."""
def __init__(self, latent_dim, noise_fn, dataloader,
batch_size=32, device='cpu'):
"""Initialize the AEGAN.
Args:
latent_dim: latent-space dimension. Must be divisible by 4.
noise_fn: function f(num: int) -> pytorch tensor, (latent vectors)
dataloader: a pytorch dataloader for loading images
batch_size: training batch size. Must match that of dataloader
device: cpu or CUDA
"""
assert latent_dim % 4 == 0
self.latent_dim = latent_dim
self.device = device
self.noise_fn = noise_fn
self.dataloader = dataloader
self.batch_size = batch_size
self.criterion_gen = nn.BCELoss()
self.criterion_recon_image = nn.L1Loss()
self.criterion_recon_latent = nn.MSELoss()
self.target_ones = torch.ones((batch_size, 1), device=device)
self.target_zeros = torch.zeros((batch_size, 1), device=device)
self._init_generator()
self._init_encoder()
self._init_dx()
self._init_dz()
def _init_generator(self):
self.generator = Generator(latent_dim=self.latent_dim)
self.generator = self.generator.to(self.device)
self.optim_g = optim.Adam(self.generator.parameters(),
lr=2e-4, betas=(0.5, 0.999),
weight_decay=1e-8)
def _init_encoder(self):
self.encoder = Encoder(latent_dim=self.latent_dim, device=self.device)
self.encoder = self.encoder.to(self.device)
self.optim_e = optim.Adam(self.encoder.parameters(),
lr=2e-4, betas=(0.5, 0.999),
weight_decay=1e-8)
def _init_dx(self):
self.discriminator_image = DiscriminatorImage(device=self.device).to(self.device)
self.optim_di = optim.Adam(self.discriminator_image.parameters(),
lr=1e-4, betas=(0.5, 0.999),
weight_decay=1e-8)
def _init_dz(self):
self.discriminator_latent = DiscriminatorLatent(
latent_dim=self.latent_dim,
device=self.device,
).to(self.device)
self.optim_dl = optim.Adam(self.discriminator_latent.parameters(),
lr=1e-4, betas=(0.5, 0.999),
weight_decay=1e-8)
def generate_samples(self, latent_vec=None, num=None):
"""Sample images from the generator.
Images are returned as a 4D tensor of values between -1 and 1.
Dimensions are (number, channels, height, width). Returns the tensor
on cpu.
Args:
latent_vec: A pytorch latent vector or None
num: The number of samples to generate if latent_vec is None
If latent_vec and num are None then use self.batch_size
random latent vectors.
"""
num = self.batch_size if num is None else num
latent_vec = self.noise_fn(num) if latent_vec is None else latent_vec
with torch.no_grad():
samples = self.generator(latent_vec)
samples = samples.cpu() # move images to cpu
return samples
def train_step_generators(self, X):
"""Train the generator one step and return the loss."""
self.generator.zero_grad()
self.encoder.zero_grad()
Z = self.noise_fn(self.batch_size)
X_hat = self.generator(Z)
Z_hat = self.encoder(X)
X_tilde = self.generator(Z_hat)
Z_tilde = self.encoder(X_hat)
X_hat_confidence = self.discriminator_image(X_hat)
Z_hat_confidence = self.discriminator_latent(Z_hat)
X_tilde_confidence = self.discriminator_image(X_tilde)
Z_tilde_confidence = self.discriminator_latent(Z_tilde)
X_hat_loss = self.criterion_gen(X_hat_confidence, self.target_ones)
Z_hat_loss = self.criterion_gen(Z_hat_confidence, self.target_ones)
X_tilde_loss = self.criterion_gen(X_tilde_confidence, self.target_ones)
Z_tilde_loss = self.criterion_gen(Z_tilde_confidence, self.target_ones)
X_recon_loss = self.criterion_recon_image(X_tilde, X) * ALPHA_RECONSTRUCT_IMAGE
Z_recon_loss = self.criterion_recon_latent(Z_tilde, Z) * ALPHA_RECONSTRUCT_LATENT
X_loss = (X_hat_loss + X_tilde_loss) / 2 * ALPHA_DISCRIMINATE_IMAGE
Z_loss = (Z_hat_loss + Z_tilde_loss) / 2 * ALPHA_DISCRIMINATE_LATENT
loss = X_loss + Z_loss + X_recon_loss + Z_recon_loss
loss.backward()
self.optim_e.step()
self.optim_g.step()
return X_loss.item(), Z_loss.item(), X_recon_loss.item(), Z_recon_loss.item()
def train_step_discriminators(self, X):
"""Train the discriminator one step and return the losses."""
self.discriminator_image.zero_grad()
self.discriminator_latent.zero_grad()
Z = self.noise_fn(self.batch_size)
with torch.no_grad():
X_hat = self.generator(Z)
Z_hat = self.encoder(X)
X_tilde = self.generator(Z_hat)
Z_tilde = self.encoder(X_hat)
X_confidence = self.discriminator_image(X)
X_hat_confidence = self.discriminator_image(X_hat)
X_tilde_confidence = self.discriminator_image(X_tilde)
Z_confidence = self.discriminator_latent(Z)
Z_hat_confidence = self.discriminator_latent(Z_hat)
Z_tilde_confidence = self.discriminator_latent(Z_tilde)
X_loss = 2 * self.criterion_gen(X_confidence, self.target_ones)
X_hat_loss = self.criterion_gen(X_hat_confidence, self.target_zeros)
X_tilde_loss = self.criterion_gen(X_tilde_confidence, self.target_zeros)
Z_loss = 2 * self.criterion_gen(Z_confidence, self.target_ones)
Z_hat_loss = self.criterion_gen(Z_hat_confidence, self.target_zeros)
Z_tilde_loss = self.criterion_gen(Z_tilde_confidence, self.target_zeros)
loss_images = (X_loss + X_hat_loss + X_tilde_loss) / 4
loss_latent = (Z_loss + Z_hat_loss + Z_tilde_loss) / 4
loss = loss_images + loss_latent
loss.backward()
self.optim_di.step()
self.optim_dl.step()
return loss_images.item(), loss_latent.item()
def train_epoch(self, print_frequency=1, max_steps=0):
"""Train both networks for one epoch and return the losses.
Args:
print_frequency (int): print stats every `print_frequency` steps.
max_steps (int): End epoch after `max_steps` steps, or set to 0
to do the full epoch.
"""
ldx, ldz, lgx, lgz, lrx, lrz = 0, 0, 0, 0, 0, 0
eps = 1e-9
for batch, (real_samples, _) in enumerate(self.dataloader):
real_samples = real_samples.to(self.device)
ldx_, ldz_ = self.train_step_discriminators(real_samples)
ldx += ldx_
ldz += ldz_
lgx_, lgz_, lrx_, lrz_ = self.train_step_generators(real_samples)
lgx += lgx_
lgz += lgz_
lrx += lrx_
lrz += lrz_
if print_frequency and (batch+1) % print_frequency == 0:
print(f"{batch+1}/{len(self.dataloader)}:"
f" G={lgx / (eps + (batch+1) * ALPHA_DISCRIMINATE_IMAGE):.3f},"
f" E={lgz / (eps + (batch+1) * ALPHA_DISCRIMINATE_LATENT):.3f},"
f" Dx={ldx / (eps + (batch+1)):.3f},"
f" Dz={ldz / (eps + (batch+1)):.3f}",
f" Rx={lrx / (eps + (batch+1) * ALPHA_RECONSTRUCT_IMAGE):.3f}",
f" Rz={lrz / (eps + (batch+1) * ALPHA_RECONSTRUCT_LATENT):.3f}",
end='\r',
flush=True)
if max_steps and batch == max_steps:
break
if print_frequency:
print()
lgx /= batch
lgz /= batch
ldx /= batch
ldz /= batch
lrx /= batch
lrz /= batch
return lgx, lgz, ldx, ldz, lrx, lrz
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.ones",
"torch.sum",
"torch.nn.Softmax",
"torch.nn.functional.pixel_shuffle",
"torch.nn.BCELoss",
"torch.zeros",
"torch.nn.Tanh",
"torch.nn.Conv2d",
"torch.nn.MSELoss",
"torch.nn.Sigmoid",
"torch.no_grad",
"torch.nn.L1Loss",
"torch.nn.Upsample",
"torch.nn.BatchNorm1d",
"torch.nn.ZeroPad2d"
] | 1.10.0 | godomainz/PokeGAN | 253c267a89e7545524bcd0db3859fdfa801e303e |
1.4 | import util
import numpy as np
import random
from transformers import BertTokenizer, AutoTokenizer
import os
from os.path import join
import json
import pickle
import logging
import torch
import itertools
logger = logging.getLogger(__name__)
class CorefDataProcessor:
def __init__(self, config, language='english'):
self.config = config
self.language = language
self.max_seg_len = config['max_segment_len']
self.max_training_seg = config['max_training_sentences']
self.data_dir = config['data_dir']
# Get tensorized samples
cache_path = self.get_cache_path()
if os.path.exists(cache_path):
# Load cached tensors if exists
with open(cache_path, 'rb') as f:
self.tensor_samples, self.stored_info = pickle.load(f)
logger.info('Loaded tensorized examples from cache')
else:
# Generate tensorized samples
self.tensor_samples = {}
tensorizer = Tensorizer(self.config)
paths = {
'trn': join(self.data_dir, f'{language}-train.{self.max_seg_len}.jsonlines'),
'dev': join(self.data_dir, f'{language}-dev.{self.max_seg_len}.jsonlines'),
# 'tst': join(self.data_dir, f'{language}-test.{self.max_seg_len}.jsonlines')
}
for split, path in paths.items():
logger.info('Tensorizing examples from %s; results will be cached)' % path)
is_training = (split == 'trn')
with open(path, 'r') as f:
samples = [json.loads(line) for line in f.readlines()]
print(util.count_singletons(samples))
tensor_samples = [tensorizer.tensorize_example(sample, is_training) for sample in samples]
self.tensor_samples[split] = [(doc_key, self.convert_to_torch_tensor(*tensor)) for doc_key, tensor in tensor_samples]
self.stored_info = tensorizer.stored_info
# Cache tensorized samples
with open(cache_path, 'wb') as f:
pickle.dump((self.tensor_samples, self.stored_info), f)
@classmethod
def convert_to_torch_tensor(cls, input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map,
is_training, gold_starts, gold_ends, gold_mention_cluster_map):
input_ids = torch.tensor(input_ids, dtype=torch.long)
input_mask = torch.tensor(input_mask, dtype=torch.long)
speaker_ids = torch.tensor(speaker_ids, dtype=torch.long)
sentence_len = torch.tensor(sentence_len, dtype=torch.long)
genre = torch.tensor(genre, dtype=torch.long)
sentence_map = torch.tensor(sentence_map, dtype=torch.long)
is_training = torch.tensor(is_training, dtype=torch.bool)
gold_starts = torch.tensor(gold_starts, dtype=torch.long)
gold_ends = torch.tensor(gold_ends, dtype=torch.long)
gold_mention_cluster_map = torch.tensor(gold_mention_cluster_map, dtype=torch.long)
return input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, \
is_training, gold_starts, gold_ends, gold_mention_cluster_map,
def get_tensor_examples(self):
# For each split, return list of tensorized samples to allow variable length input (batch size = 1)
return self.tensor_samples['trn'], self.tensor_samples['dev']
def get_stored_info(self):
return self.stored_info
def get_cache_path(self):
cache_path = join(self.data_dir, f'cached.tensors.{self.language}.{self.max_seg_len}.{self.max_training_seg}.bin')
return cache_path
class Tensorizer:
def __init__(self, config):
self.config = config
self.tokenizer = AutoTokenizer.from_pretrained(config['bert_tokenizer_name'])
# Will be used in evaluation
self.stored_info = {}
self.stored_info['tokens'] = {} # {doc_key: ...}
self.stored_info['subtoken_maps'] = {} # {doc_key: ...}; mapping back to tokens
self.stored_info['gold'] = {} # {doc_key: ...}
self.stored_info['genre_dict'] = {genre: idx for idx, genre in enumerate(config['genres'])}
def _tensorize_spans(self, spans):
if len(spans) > 0:
starts, ends = zip(*spans)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def _tensorize_span_w_labels(self, spans, label_dict):
if len(spans) > 0:
starts, ends, labels = zip(*spans)
else:
starts, ends, labels = [], [], []
return np.array(starts), np.array(ends), np.array([label_dict[label] for label in labels])
def _get_speaker_dict(self, speakers):
speaker_dict = {'UNK': 0, '[SPL]': 1}
for speaker in speakers:
if len(speaker_dict) > self.config['max_num_speakers']:
pass # 'break' to limit # speakers
if speaker not in speaker_dict:
speaker_dict[speaker] = len(speaker_dict)
return speaker_dict
def tensorize_example(self, example, is_training):
# Mentions and clusters
clusters = example['clusters']
gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters))
gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)}
gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1
# Speakers
speakers = example['speakers']
speaker_dict = self._get_speaker_dict(util.flatten(speakers))
# Sentences/segments
sentences = example['sentences'] # Segments
sentence_map = example['sentence_map']
num_words = sum([len(s) for s in sentences])
max_sentence_len = self.config['max_segment_len']
sentence_len = np.array([len(s) for s in sentences])
# Bert input
input_ids, input_mask, speaker_ids = [], [], []
for idx, (sent_tokens, sent_speakers) in enumerate(zip(sentences, speakers)):
sent_input_ids = self.tokenizer.convert_tokens_to_ids(sent_tokens)
sent_input_mask = [1] * len(sent_input_ids)
sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers]
while len(sent_input_ids) < max_sentence_len:
sent_input_ids.append(0)
sent_input_mask.append(0)
sent_speaker_ids.append(0)
input_ids.append(sent_input_ids)
input_mask.append(sent_input_mask)
speaker_ids.append(sent_speaker_ids)
input_ids = np.array(input_ids)
input_mask = np.array(input_mask)
speaker_ids = np.array(speaker_ids)
assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask))
# Keep info to store
doc_key = example['doc_key']
self.stored_info['subtoken_maps'][doc_key] = example.get('subtoken_map', None)
self.stored_info['gold'][doc_key] = example['clusters']
# self.stored_info['tokens'][doc_key] = example['tokens']
# Construct example
genre = self.stored_info['genre_dict'].get(doc_key[:2], 0)
gold_starts, gold_ends = self._tensorize_spans(gold_mentions)
example_tensor = (input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, is_training,
gold_starts, gold_ends, gold_mention_cluster_map)
if is_training and len(sentences) > self.config['max_training_sentences']:
return doc_key, self.truncate_example(*example_tensor)
else:
return doc_key, example_tensor
def truncate_example(self, input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, is_training,
gold_starts=None, gold_ends=None, gold_mention_cluster_map=None, sentence_offset=None):
max_sentences = self.config["max_training_sentences"]
num_sentences = input_ids.shape[0]
assert num_sentences > max_sentences
sent_offset = sentence_offset
if sent_offset is None:
sent_offset = random.randint(0, num_sentences - max_sentences)
word_offset = sentence_len[:sent_offset].sum()
num_words = sentence_len[sent_offset: sent_offset + max_sentences].sum()
input_ids = input_ids[sent_offset: sent_offset + max_sentences, :]
input_mask = input_mask[sent_offset: sent_offset + max_sentences, :]
speaker_ids = speaker_ids[sent_offset: sent_offset + max_sentences, :]
sentence_len = sentence_len[sent_offset: sent_offset + max_sentences]
sentence_map = sentence_map[word_offset: word_offset + num_words]
if gold_starts is None:
return input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, is_training
gold_spans = (gold_starts < word_offset + num_words) & (gold_ends >= word_offset)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
gold_mention_cluster_map = gold_mention_cluster_map[gold_spans]
return input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, \
is_training, gold_starts, gold_ends, gold_mention_cluster_map
def split_example(self, input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, is_training,
gold_starts=None, gold_ends=None, gold_mention_cluster_map=None, sentence_offset=None):
max_sentences = self.config["max_training_sentences"] if "max_pred_sentences" not in self.config else self.config["max_pred_sentences"]
num_sentences = input_ids.shape[0]
offset = 0
splits = []
while offset < num_sentences:
splits.append(self.truncate_example(input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, is_training,
gold_starts, gold_ends, gold_mention_cluster_map, sentence_offset=offset))
offset += max_sentences
return splits | [
"torch.tensor"
] | 1.4.0 | ondfa/coref-multiling | ac4ccf4ddb3187939525c5e7076057e7fdce55a4 |
1.0 | import torch.nn as nn
from simplecv.interface import ConfigurableMixin
from simplecv.module._hrnet import hrnetv2_w18
from simplecv.module._hrnet import hrnetv2_w32
from simplecv.module._hrnet import hrnetv2_w40
from simplecv.module._hrnet import hrnetv2_w48
from simplecv.module import context_block
from simplecv.module import se_block
from simplecv import registry
from torch.utils import checkpoint as cp
from simplecv.util import logger
_logger = logger.get_logger()
registry.MODEL.register('hrnetv2_w18', hrnetv2_w18)
registry.MODEL.register('hrnetv2_w32', hrnetv2_w32)
registry.MODEL.register('hrnetv2_w40', hrnetv2_w40)
registry.MODEL.register('hrnetv2_w48', hrnetv2_w48)
defalut_config = dict(
hrnet_type='hrnetv2_w18',
pretrained=False,
weight_path=None,
norm_eval=False,
frozen_stages=-1,
with_cp=False
)
@registry.MODEL.register('HRNetEncoder')
class HRNetEncoder(nn.Module, ConfigurableMixin):
def __init__(self, config=defalut_config):
super(HRNetEncoder, self).__init__()
ConfigurableMixin.__init__(self, config)
self.hrnet = registry.MODEL[self.config.hrnet_type](pretrained=self.config.pretrained,
weight_path=self.config.weight_path,
norm_eval=self.config.norm_eval,
frozen_stages=self.config.frozen_stages)
_logger.info('HRNetEncoder: pretrained = {}'.format(self.config.pretrained))
def forward(self, x):
if self.config.with_cp and not self.training:
return cp.checkpoint(self.hrnet, x)
return self.hrnet(x)
# stage 1
@property
def stage1(self):
return self.hrnet.stage1
@stage1.setter
def stage1(self, value):
del self.hrnet.stage1
self.hrnet.stage1 = value
# stage 2
@property
def stage2(self):
return self.hrnet.stage2
@stage2.setter
def stage2(self, value):
del self.hrnet.stage2
self.hrnet.stage2 = value
# stage 3
@property
def stage3(self):
return self.hrnet.stage3
@stage3.setter
def stage3(self, value):
del self.hrnet.stage3
self.hrnet.stage3 = value
# stage 4
@property
def stage4(self):
return self.hrnet.stage4
@stage4.setter
def stage4(self, value):
del self.hrnet.stage4
self.hrnet.stage4 = value
def set_defalut_config(self):
self.config.update(defalut_config)
def output_channels(self):
if self.config.hrnet_type == 'hrnetv2_w18':
return 18, 36, 72, 144
elif self.config.hrnet_type == 'hrnetv2_w32':
return 32, 64, 128, 256
elif self.config.hrnet_type == 'hrnetv2_w40':
return 40, 80, 160, 320
elif self.config.hrnet_type == 'hrnetv2_w48':
return 48, 96, 192, 384
else:
raise NotImplementedError('{} is not implemented.'.format(self.config.hrnet_type))
def with_context_block(self, ratio):
_logger.info('With context block (ratio = {})'.format(ratio))
assert ratio in [1 / 16., 1 / 8.]
self.stage2 = context_block.plugin_to_basicblock(self.stage2, ratio)
self.stage3 = context_block.plugin_to_basicblock(self.stage3, ratio)
self.stage4 = context_block.plugin_to_basicblock(self.stage4, ratio)
def with_squeeze_excitation(self, inv_ratio):
_logger.info('With squeeze_excitation block (inv_ratio = {})'.format(inv_ratio))
assert inv_ratio in [16, 8]
self.stage2 = se_block.plugin_to_basicblock(self.stage2, inv_ratio)
self.stage3 = se_block.plugin_to_basicblock(self.stage3, inv_ratio)
self.stage4 = se_block.plugin_to_basicblock(self.stage4, inv_ratio)
| [
"torch.utils.checkpoint.checkpoint"
] | 1.0 | Bobholamovic/SimpleCV | f4edacf088d0155725a469e227de847820bdfa53 |
1.0 | from __future__ import division
from models import *
from utils.utils import *
from utils.datasets import *
import os
import sys
import time
import datetime
import argparse
from PIL import Image
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--image_folder", type=str, default="data/samples", help="path to dataset")
parser.add_argument("--model_def", type=str, default="config/yolov3-custom.cfg", help="path to model definition file")
parser.add_argument("--weights_path", type=str, default="checkpoints/yolov3.pth", help="path to weights file")
parser.add_argument("--class_path", type=str, default="data/custom/classes.names", help="path to class label file")
parser.add_argument("--conf_thres", type=float, default=0.9, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--checkpoint_model", type=str, help="path to checkpoint model")
opt = parser.parse_args()
print(opt)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
os.makedirs("output", exist_ok=True)
# Set up model
model = Darknet(opt.model_def, img_size=opt.img_size).to(device)
if opt.weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(opt.weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(opt.weights_path))
model.eval() # Set in evaluation mode
dataloader = DataLoader(
ImageFolder(opt.image_folder, img_size=opt.img_size),
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_cpu,
)
classes = load_classes(opt.class_path) # Extracts class labels from file
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
imgs = [] # Stores image paths
img_detections = [] # Stores detections for each image index
print("\nPerforming object detection:")
prev_time = time.time()
for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
# Configure input
input_imgs = Variable(input_imgs.type(Tensor))
# Get detections
with torch.no_grad():
detections = model(input_imgs)
detections = non_max_suppression(detections, opt.conf_thres, opt.nms_thres)
# Log progress
current_time = time.time()
inference_time = datetime.timedelta(seconds=current_time - prev_time)
prev_time = current_time
print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))
# Save image and detections
imgs.extend(img_paths)
img_detections.extend(detections)
# Bounding-box colors
cmap = plt.get_cmap("tab20b")
colors = [cmap(i) for i in np.linspace(0, 1, 20)]
print("\nSaving images:")
# Iterate through images and save plot of detections
for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):
print("(%d) Image: '%s'" % (img_i, path))
# Create plot
img = np.array(Image.open(path))
plt.figure()
plt.rcParams['font.sans-serif'] = ['SimHei']
fig, ax = plt.subplots(1)
ax.imshow(img)
# Draw bounding boxes and labels of detections
if detections is not None:
# Rescale boxes to original image
detections = rescale_boxes(detections, opt.img_size, img.shape[:2])
unique_labels = detections[:, -1].cpu().unique()
n_cls_preds = len(unique_labels)
bbox_colors = random.sample(colors, n_cls_preds)
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))
box_w = x2 - x1
box_h = y2 - y1
color = "red" # bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
# Create a Rectangle patch
bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
# Add the bbox to the plot
ax.add_patch(bbox)
# Add label
plt.text(
x1,
y1-40,
s=classes[int(cls_pred)],
color="white",
fontdict={"size": 10},
verticalalignment="top",
bbox={"color": color, "pad": 0},
)
# Save generated image with detections
plt.axis("off")
plt.gca().xaxis.set_major_locator(NullLocator())
plt.gca().yaxis.set_major_locator(NullLocator())
filename = path.split("/")[-1].split(".")[0]
plt.savefig(f"output/{filename}.png", bbox_inches="tight", pad_inches=0.0)
plt.close()
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
] | 1.0 | ljx02/MLFinalWork | d0eb1551401035be5b8c12fe1fd3733aefcc652f |
0.27 | # Copyright 2021 MosaicML. All Rights Reserved.
"""Core MixUp classes and functions."""
from __future__ import annotations
import logging
from typing import Optional, Tuple
import numpy as np
import torch
from torch.nn import functional as F
from composer.core.types import Algorithm, Event, Logger, State, Tensor
from composer.models.loss import _check_for_index_targets
log = logging.getLogger(__name__)
__all__ = ["MixUp", "mixup_batch"]
def mixup_batch(input: Tensor,
target: Tensor,
num_classes: int,
mixing: Optional[float] = None,
alpha: float = 0.2,
indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Create new samples using convex combinations of pairs of samples.
This is done by taking a convex combination of ``input`` with a randomly
permuted copy of ``input``. The permutation takes place along the sample
axis (dim 0).
The relative weight of the original ``input`` versus the permuted copy is
defined by the ``mixing`` parameter. This parameter should be chosen
from a ``Beta(alpha, alpha)`` distribution for some parameter ``alpha > 0``.
Note that the same ``mixing`` is used for the whole batch.
Args:
input (torch.Tensor): input tensor of shape ``(minibatch, ...)``, where
``...`` indicates zero or more dimensions.
target (torch.Tensor): target tensor of shape ``(minibatch, ...)``, where
``...`` indicates zero or more dimensions.
num_classes (int): total number of classes or output variables
mixing (float, optional): coefficient used to interpolate
between the two examples. If provided, must be in :math:`[0, 1]`.
If ``None``, value is drawn from a ``Beta(alpha, alpha)``
distribution. Default: ``None``.
alpha (float, optional): parameter for the Beta distribution over
``mixing``. Ignored if ``mixing`` is provided. Default: ``0.2``.
indices (Tensor, optional): Permutation of the samples to use.
Default: ``None``.
Returns:
input_mixed (torch.Tensor): batch of inputs after mixup has been applied
target_mixed (torch.Tensor): labels after mixup has been applied
perm (torch.Tensor): the permutation used
Example:
.. testcode::
import torch
from composer.functional import mixup_batch
N, C, H, W = 2, 3, 4, 5
num_classes = 10
X = torch.randn(N, C, H, W)
y = torch.randint(num_classes, size=(N,))
X_mixed, y_mixed, perm = mixup_batch(
X, y, num_classes=num_classes, alpha=0.2)
"""
if mixing is None:
mixing = _gen_mixing_coef(alpha)
# Create shuffled versions of x and y in preparation for interpolation
# Use given indices if there are any.
if indices is None:
shuffled_idx = torch.randperm(input.shape[0])
else:
shuffled_idx = indices
x_shuffled = input[shuffled_idx]
y_shuffled = target[shuffled_idx]
# Interpolate between the inputs
x_mix = (1 - mixing) * input + mixing * x_shuffled
# First check if labels are indices. If so, convert them to onehots.
# This is under the assumption that the loss expects torch.LongTensor, which is true for pytorch cross_entropy
if _check_for_index_targets(target):
y_onehot = F.one_hot(target, num_classes=num_classes)
y_shuffled_onehot = F.one_hot(y_shuffled, num_classes=num_classes)
y_mix = ((1. - mixing) * y_onehot + mixing * y_shuffled_onehot)
else:
y_mix = ((1. - mixing) * target + mixing * y_shuffled)
return x_mix, y_mix, shuffled_idx
class MixUp(Algorithm):
"""`MixUp <https://arxiv.org/abs/1710.09412>`_ trains the network on convex combinations of pairs of examples and
targets rather than individual examples and targets.
This is done by taking a convex combination of a given batch X with a
randomly permuted copy of X. The mixing coefficient is drawn from a
``Beta(alpha, alpha)`` distribution.
Training in this fashion sometimes reduces generalization error.
Args:
num_classes (int): the number of classes in the task labels.
alpha (float, optional): the psuedocount for the Beta distribution used to sample
mixing parameters. As ``alpha`` grows, the two samples
in each pair tend to be weighted more equally. As ``alpha``
approaches 0 from above, the combination approaches only using
one element of the pair. Default: ``0.2``.
Example:
.. testcode::
from composer.algorithms import MixUp
algorithm = MixUp(num_classes=10, alpha=0.2)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(self, num_classes: int, alpha: float = 0.2):
self.num_classes = num_classes
self.alpha = alpha
self.mixing = 0.0
self.indices = torch.Tensor()
def match(self, event: Event, state: State) -> bool:
"""Runs on Event.INIT and Event.AFTER_DATALOADER.
Args:
event (:class:`Event`): The current event.
state (:class:`State`): The current state.
Returns:
bool: True if this algorithm should run now.
"""
return event == Event.AFTER_DATALOADER
def apply(self, event: Event, state: State, logger: Logger) -> None:
"""Applies MixUp augmentation on State input.
Args:
event (Event): the current event
state (State): the current trainer state
logger (Logger): the training logger
"""
input, target = state.batch_pair
assert isinstance(input, Tensor) and isinstance(target, Tensor), \
"Multiple tensors for inputs or targets not supported yet."
self.mixing = _gen_mixing_coef(self.alpha)
new_input, new_target, self.indices = mixup_batch(
input,
target,
mixing=self.mixing,
num_classes=self.num_classes,
)
state.batch = (new_input, new_target)
def _gen_mixing_coef(alpha: float) -> float:
"""Samples ``max(z, 1-z), z ~ Beta(alpha, alpha)``."""
# First check if alpha is positive.
assert alpha >= 0
# Draw the mixing parameter from a beta distribution.
# Check here is needed because beta distribution requires alpha > 0
# but alpha = 0 is fine for mixup.
if alpha == 0:
mixing_lambda = 0
else:
mixing_lambda = np.random.beta(alpha, alpha)
# for symmetric beta distribution, can always use 0 <= lambda <= .5;
# this way the "main" label is always the original one, which keeps
# the training accuracy meaningful
return max(mixing_lambda, 1. - mixing_lambda)
| [
"torch.Tensor",
"torch.nn.functional.one_hot",
"torch.randperm"
] | 0.27 | stanford-crfm/composer | 4996fbd818971afd6439961df58b531d9b47a37b |
1.5 | """Torch backend distributions"""
def get_TorchDeterministic():
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.kl import register_kl
from torch.distributions.utils import broadcast_all
class TorchDeterministic(torch.distributions.distribution.Distribution):
"""Deterministic distribution for PyTorch"""
arg_constraints = {"loc": constraints.real}
support = constraints.real
has_rsample = True
@property
def mean(self):
return self.loc
@property
def stddev(self):
return 0.0
@property
def variance(self):
return 0.0
def __init__(self, loc, validate_args=None):
self.loc = broadcast_all(loc)[0]
if isinstance(loc, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(TorchDeterministic, self).__init__(
batch_shape, validate_args=validate_args
)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(TorchDeterministic, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
super(TorchDeterministic, new).__init__(
batch_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
ones = torch.ones(
shape, dtype=self.loc.dtype, device=self.loc.device
)
return self.loc * ones
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return torch.log(value.eq(self.loc).type_as(self.loc))
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
result = value.gt(self.loc).type_as(self.loc)
return result.clamp(min=0, max=1)
def icdf(self, value):
if self._validate_args:
self._validate_sample(value)
result = value.lt(self.loc).type_as(self.loc)
return result
def entropy(self):
return torch.log(torch.zeros([1]))
@register_kl(
TorchDeterministic, torch.distributions.distribution.Distribution
)
def kl_deterministic_continuous(p, q):
return -q.log_prob(p.mean)
return TorchDeterministic
| [
"torch.Size",
"torch.zeros",
"torch.distributions.utils.broadcast_all",
"torch.ones",
"torch.distributions.kl.register_kl"
] | 1.5.0 | chiragnagpal/probflow | 1ba0619cd4f482a015cd25633d2f113d5d0f3476 |
1.4 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class SeqSoftmaxDecoder(nn.Module):
"""This class decodes sequence hidden unit
"""
def __init__(self, hidden_size, label_size, bias=True):
"""This function sets SeqSoftmaxDecoder input/output size
Arguments:
hidden_size {int} -- the size of hidden unit
label_size {int} -- the size of label
Keyword Arguments:
bias {bool} -- adding bias or not (default: {True})
"""
super().__init__()
self.hidden_size = hidden_size
self.label_size = label_size
self.hidden2label = nn.Linear(hidden_size, label_size, bias)
self.hidden2label.weight.data.normal_(mean=0.0, std=0.02)
if self.hidden2label.bias is not None:
self.hidden2label.bias.data.zero_()
self.loss = nn.CrossEntropyLoss()
def get_input_dim(self):
return self.hidden_size
def get_output_dim(self):
return self.label_size
def forward(self, seq_inputs, seq_mask=None, seq_labels=None):
"""This function propagates forwardly
Arguments:
seq_inputs {tensor} -- input data, shape: (batch_size, seq_size, input_size)
Keyword Arguments:
seq_mask {tensor} -- mask tensor, shape: (batch_size, seq_size) (default: {None})
seq_labels {tensor} -- label data, shape: (batch_size, seq_size) (default: {None})
Returns:
dict -- results: loss, predict, log_probs
"""
_, _, input_size = seq_inputs.size()
assert input_size == self.hidden_size, "input size is not equal to hidden size"
results = {}
seq_outpus = self.hidden2label(seq_inputs)
seq_log_probs = F.log_softmax(seq_outpus, dim=2)
seq_preds = seq_log_probs.argmax(dim=2)
results['predict'] = seq_preds
if seq_labels is not None:
if seq_mask is not None:
active_loss = seq_mask.view(-1) == 1
active_outputs = seq_outpus.view(-1, self.label_size)[active_loss]
active_labels = seq_labels.view(-1)[active_loss]
no_pad_avg_loss = self.loss(active_outputs, active_labels)
results['loss'] = no_pad_avg_loss
else:
avg_loss = self.loss(seq_outpus.view(-1, self.label_size), seq_labels.view(-1))
results['loss'] = avg_loss
return results
| [
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.log_softmax"
] | 1.4.0 | Receiling/ENPAR | decd2945d21a7be5a0f73c37cfc5e252301aab15 |
1.6 | # Differentiable Augmentation for Data-Efficient GAN Training
# Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han
# https://arxiv.org/pdf/2006.10738
from random import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class DiffAugment(nn.Module):
def __init__(self, policy='', channels_first=True):
super(DiffAugment, self).__init__()
self.policy = policy
self.channels_first = channels_first
def forward(self, x):
if self.policy:
if not self.channels_first:
x = x.permute(0, 3, 1, 2)
for p in self.policy.split(','):
for f in AUGMENT_FNS[p]:
x = f(x)
if not self.channels_first:
x = x.permute(0, 2, 3, 1)
x = x.contiguous()
return x
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
def rand_upscale(x, ratio=0.25):
up_ratio = 1.0 + ratio * random()
sz = x.size(2)
x = torch.nn.functional.interpolate(x, scale_factor=up_ratio, mode='bilinear')
return center_crop(x, sz)
def center_crop(x, sz):
h, w = x.size(2), x.size(3)
x1 = int(round((h - sz) / 2.))
y1 = int(round((w - sz) / 2.))
return x[:, :, x1:x1+sz, y1:y1+sz]
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
'upscale': [rand_upscale],
}
| [
"torch.nn.functional.pad",
"torch.nn.functional.interpolate",
"torch.arange"
] | 1.6.0 | AyushExel/GANSketching | c72524ac4425de898087af7a4c554b777a4e2218 |
1.7 | ################################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
################################################################################
import h5py
import torch.utils.data as data
import pickle
import numpy as np
import torch
import os
import os.path
import sys
import math, random
import skimage
from skimage.transform import resize
from skimage import io
from skimage.transform import rotate
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def rgb_to_chromaticity(rgb):
""" converts rgb to chromaticity """
irg = np.zeros_like(rgb)
s = np.sum(rgb, axis=-1) + 1e-8
# 每一行相加
irg[..., 0] = rgb[..., 0] / s
irg[..., 1] = rgb[..., 1] / s
irg[..., 2] = rgb[..., 2] / s
return irg
def make_dataset(list_dir):
file_name = list_dir + "img_batch.p"
images_list = pickle.load( open( file_name, "rb" ) )
return images_list
# This is Image loader for unlabel video clips
class ImageFolder(data.Dataset):
def __init__(self, root, list_dir, transform=None,
loader=None):
# load image list from hdf5
img_list = make_dataset(list_dir)
if len(img_list) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.list_dir = list_dir
self.img_list = img_list
self.transform = transform
x = np.arange(-1, 2)
y = np.arange(-1, 2)
self.X, self.Y = np.meshgrid(x, y)
self.num_scale = 4
self.sigma_chro = 0.025
self.sigma_I = 0.12
self.half_window = 1
self.rotation_range = 5
self.input_height = 256
self.input_width = 384
def construst_sub_L(self, L):
h = L.shape[0]
w = L.shape[1]
sub_L = np.zeros( (9 ,L.shape[0]-2,L.shape[1]-2))
ct_idx = 0
for k in range(0, self.half_window*2+1):
for l in range(0,self.half_window*2+1):
sub_L[ct_idx,:,:] = L[self.half_window + self.Y[k,l]:h- self.half_window + self.Y[k,l], \
self.half_window + self.X[k,l]: w-self.half_window + self.X[k,l]]
ct_idx += 1
return sub_L
def construst_sub_C(self, C):
h = C.shape[0]
w = C.shape[1]
sub_C = np.zeros( (9 ,C.shape[0]-2,C.shape[1]-2, 2))
ct_idx = 0
for k in range(0, self.half_window*2+1):
for l in range(0,self.half_window*2+1):
sub_C[ct_idx,:,:,:] = C[self.half_window + self.Y[k,l]:h- self.half_window + self.Y[k,l], \
self.half_window + self.X[k,l]: w-self.half_window + self.X[k,l] , 0:2]
ct_idx += 1
return sub_C
def construst_R_weights(self, N_c_0, N_L_0):
center_c = np.repeat( np.expand_dims(N_c_0[4, :, :,:], axis =0), 9, axis = 0)
center_I = np.repeat( np.expand_dims(N_L_0[4, :, :], axis =0), 9, axis = 0)
chro_diff = center_c - N_c_0
I_diff = center_I - N_L_0
r_w = np.exp( - np.sum( chro_diff**2 , 3) / (self.sigma_chro**2)) * np.exp(- (I_diff**2) /(self.sigma_I**2) )
return r_w
def DA(self, img, mode, random_pos, random_filp, random_angle, input_height, input_width):
if random_filp > 0.5:
img = np.fliplr(img)
img = img[random_pos[0]:random_pos[1], random_pos[2]:random_pos[3], :]
img = rotate(img, random_angle, order = mode)
img = resize(img, (input_height, input_width), order = mode)
return img
def generate_random_parameters(self, img):
original_h, original_w = img.shape[0], img.shape[1]
# 720 1280
# rotation_range=5
# random() 方法返回随机生成的一个实数,它在[0,1)范围内。
random_angle = random.random() * self.rotation_range * 2.0 - self.rotation_range # random angle between -5 --- 5 degree
random_filp = random.random()
ratio = float(original_w)/float(original_h)
random_resize = random.random()
if ratio > 1.4142:
random_start_y = random.randint(0, 9)
random_start_x = random.randint(0, 29)
random_pos = [random_start_y, random_start_y + original_h - 10, random_start_x, random_start_x + original_w - 30]
input_height, input_width = 256, 384
elif ratio < 1./1.4142:
random_start_y = random.randint(0, 29)
random_start_x = random.randint(0, 9)
random_pos = [random_start_y, random_start_y + original_h - 30, random_start_x, random_start_x + original_w - 10]
input_height, input_width = 384, 256
elif ratio > 1.2247:
random_start_y = random.randint(0, 29)
random_start_x = random.randint(0, 9)
random_pos = [random_start_y, random_start_y + original_h - 30, random_start_x, random_start_x + original_w - 10]
input_height, input_width = 256, 384
elif ratio < 1./ 1.2247:
random_start_y = random.randint(0, 9)
random_start_x = random.randint(0, 29)
random_pos = [random_start_y, random_start_y + original_h - 10, random_start_x, random_start_x + original_w - 30]
input_height, input_width = 384, 256
else:
random_start_y = random.randint(0, 9)
random_start_x = random.randint(0, 9)
random_pos = [random_start_y, random_start_y + original_h - 10, random_start_x, random_start_x + original_w - 10]
input_height, input_width = 256, 256
return random_angle, random_filp, random_pos, input_height, input_width
# 【-5,5】;【0,1】;x ;256 ;384
def __getitem__(self, index):
targets_1 = {}
targets_1['path'] = []
sparse_path_1s = [] # 下边三个参数都没用到啊
sparse_path_1r = [] #
temp_targets = {} #
path_list = self.img_list[index]
folder_id = path_list[0].split('/')[0]
# number of images in one sequence
num_imgs = len(path_list)
num_channel = (self.half_window*2+1)**2
# sample image
img_name = path_list[0].split('/')[-1]
img_path = self.root + str(folder_id) + "/data/" + img_name
# load original image
srgb_img = np.float32(io.imread(img_path))/ 255.0
original_h, original_w = srgb_img.shape[0], srgb_img.shape[1]
random_angle, random_filp, random_pos, input_height, input_width = self.generate_random_parameters(srgb_img)
# image intensity profiles across the sequence
local_intensity_profiles = [None, None, None,None]
local_intensity_profiles[0] = np.zeros( (num_imgs, num_channel, input_height-self.half_window*2, input_width-self.half_window*2) )
local_intensity_profiles[1] = np.zeros( (num_imgs, num_channel, input_height/2-self.half_window*2, input_width/2-self.half_window*2) )
local_intensity_profiles[2] = np.zeros( (num_imgs, num_channel, input_height/4-self.half_window*2, input_width/4-self.half_window*2) )
local_intensity_profiles[3] = np.zeros( (num_imgs, num_channel, input_height/8-self.half_window*2, input_width/8-self.half_window*2) )
# random permutation
#random_image_list = np.random.permutation(num_imgs)
# for each image in the sequence
for i in range(num_imgs):
img_name = path_list[i].split('/')[-1]
img_path = self.root + str(folder_id) + "/data/" + img_name
# load original image
srgb_img = np.float32(io.imread(img_path))/ 255.0
mask_path = self.root + str(folder_id) + "/data/" + img_name[:-4] + "_mask.png"
# load mask
mask = np.float32(io.imread(mask_path))/ 255.0
mask = np.expand_dims(mask, axis = 2)
mask = np.repeat(mask, 3, axis= 2)
# do data augmentation
assert(mask.shape[0] == srgb_img.shape[0])
assert(mask.shape[1] == srgb_img.shape[1])
srgb_img = self.DA(srgb_img, 1, random_pos, random_filp, random_angle, input_height, input_width)
mask = self.DA(mask, 0, random_pos, random_filp, random_angle, input_height, input_width)
# sky_mask = self.DA(sky_mask, 0, random_pos, random_filp, random_angle, input_height, input_width)
# 0.0001
srgb_img[srgb_img < 1e-4] = 1e-4
rgb_img = srgb_img**2.2
rgb_img[rgb_img < 1e-4] = 1e-4
chromaticity = rgb_to_chromaticity(rgb_img)
L0 = np.mean(rgb_img, 2)
for l in range(self.num_scale):
N_c_0 = self.construst_sub_C(chromaticity)
N_L_0 = self.construst_sub_L(L0)
r_w_s= self.construst_R_weights(N_c_0, N_L_0)
if ('r_w_s'+ str(l)) not in targets_1:
targets_1['r_w_s'+ str(l)] = torch.from_numpy(r_w_s).float().unsqueeze(0)
targets_1['mask_' + str(l)] = torch.from_numpy(np.transpose(mask, (2, 0, 1))).float().unsqueeze(0)
else:
targets_1['r_w_s'+ str(l)] = torch.cat( ( targets_1['r_w_s'+ str(l)], \
torch.from_numpy(r_w_s).float().unsqueeze(0)), 0)
targets_1['mask_' + str(l)] = torch.cat( (targets_1['mask_' + str(l)], \
torch.from_numpy(np.transpose(mask, (2, 0, 1))).float().unsqueeze(0)), 0)
local_intensity_profiles[l][i,:,:,:] = N_L_0
L0 = L0[::2,::2]
chromaticity = chromaticity[::2,::2,:]
mask = mask[::2,::2,:]
# create mask
if 'rgb_img' not in targets_1:
# targets_1['mask_0'] = torch.from_numpy(mask).float().unsqueeze(0)
targets_1['rgb_img'] = torch.from_numpy( np.transpose(rgb_img, (2, 0, 1)) ).unsqueeze(0).contiguous().float()
final_img = torch.from_numpy(np.transpose(srgb_img, (2, 0, 1))).unsqueeze(0).contiguous().float()
else:
# targets_1['mask_0'] = torch.cat( (targets_1['mask_0'], torch.from_numpy(mask).float().unsqueeze(0)), 0)
targets_1['rgb_img'] = torch.cat( (targets_1['rgb_img'], torch.from_numpy(np.transpose(rgb_img, (2, 0, 1))).float().unsqueeze(0)),0)
final_img = torch.cat( (final_img, torch.from_numpy(np.transpose(srgb_img, (2, 0, 1))).unsqueeze(0).contiguous().float()),0)
k1 = 20.0
k2 = 4.0
weight = 12.0
offset = 1.0/weight
# compute median of Intensity profiles for each scale
for l in range(0, self.num_scale):
intensity_profiles = local_intensity_profiles[l]
log_ratio_profiles = np.log( np.repeat( np.expand_dims( intensity_profiles[:,4,:,:], 1) , 9, axis = 1)) - np.log(intensity_profiles)
median_ratio = np.median(log_ratio_profiles, axis = 0)
median_ratio = np.repeat( np.expand_dims(median_ratio, 0), num_imgs, axis = 0)
relative_changes = (log_ratio_profiles - median_ratio)/(median_ratio + 1e-6)
sw_1 = np.exp(- k1 * (log_ratio_profiles - median_ratio)**2 )
sw_2 = np.exp(- k2 * (relative_changes)**2 )
shading_w = np.maximum(sw_1, sw_2)
shading_w = torch.from_numpy(shading_w).float()
R_w = targets_1['r_w_s' + str(l)]
R_w, index = torch.median(R_w, 0)
R_w = 1 - R_w.unsqueeze(0).repeat(shading_w.size(0), 1,1,1)
shading_w = torch.mul(offset + R_w, shading_w)
targets_1['shading_w_'+str(l)] = weight * shading_w
return final_img, targets_1, sparse_path_1r
def __len__(self):
return len(self.img_list)
class IIW_ImageFolder(data.Dataset):
def __init__(self, root, list_dir, mode, is_flip, transform=None,
loader=None):
# load image list from hdf5
img_list = make_dataset(list_dir)
if len(img_list) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.list_dir = list_dir
self.img_list = img_list
self.transform = transform
self.loader = loader
self.num_scale = 4
self.sigma_chro = 0.025
self.sigma_I = 0.1
self.half_window = 1
self.current_o_idx = mode
self.set_o_idx(mode)
x = np.arange(-1, 2)
y = np.arange(-1, 2)
self.X, self.Y = np.meshgrid(x, y)
def set_o_idx(self, o_idx):
self.current_o_idx = o_idx
if o_idx == 0:
self.height = 256
self.width = 384
elif o_idx == 1:
self.height = 384
self.width = 256
elif o_idx == 2:
self.height = 256
self.width = 256
elif o_idx == 3:
self.height = 384
self.width = 512
else:
self.height = 512
self.width = 384
def iiw_loader(self, img_path):
img_path = img_path[-1][:-3]
img_path = self.root + img_path
img = np.float32(io.imread(img_path))/ 255.0
oringinal_shape = img.shape
img = resize(img, (self.height, self.width))
return img, oringinal_shape
def __getitem__(self, index):
targets_1 = {}
img_id = self.img_list[self.current_o_idx][index].split('/')[-1][0:-6]
judgement_path = self.root + img_id + 'json'
img, oringinal_shape = self.iiw_loader(self.img_list[self.current_o_idx][index].split('/'))
targets_1['path'] = self.img_list[self.current_o_idx][index]
targets_1["judgements_path"] = judgement_path
targets_1["oringinal_shape"] = oringinal_shape
final_img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2,0,1)))).contiguous().float()
return final_img, targets_1
def __len__(self):
return len(self.img_list[self.current_o_idx])
| [
"torch.mul",
"torch.median",
"torch.from_numpy"
] | 1.7.0 | mckaymckay/Unsupervised-learning | 46d627fc5b24043ebb6f2e3b4388b66be3ece7d5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.