version
stringclasses 24
values | code
stringlengths 396
135k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 6
64
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.8 | import numpy as np
import torch
from discrete_network.network import KNNet, KNNetParameters, KNNetState
from discrete_network.method.force_method import ForceParameters, ForceLearn
from discrete_network.device import device
import matplotlib.pyplot as plt
print(f"Device = {device.type}")
# params_spiking = KNNetParameters(eps = 0.015, beta = 0.0, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
# params_spiking = KNNetParameters(eps = 0.015, beta = 0.03, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
# params_spiking = KNNetParameters(eps = 0.015, beta = 0.05, d = 0.26, a = 0.25, J = 0.15)
# normal spike
# params_spiking = KNNetParameters(eps = 0.02, beta = 0.0, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
# params_spiking = KNNetParameters(eps = 0.03, beta = 0.035, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
def one_neuron(x0, y0, iteration, p: KNNetParameters):
"""The dynamics of one neuron. Return x, y."""
x, y = np.zeros(iteration), np.zeros(iteration)
x[0], y[0] = x0, y0
for i in range(iteration - 1):
x[i + 1] = (
x[i]
+ x[i] * (x[i] - p.a) * (1 - x[i])
- p.beta * (x[i] > p.d)
- y[i]
)
y[i + 1] = y[i] + p.eps * (x[i] - p.J)
return x, y
imin = 0; icrit = 20000; nt = 21000
input_size = 0
hidden_size = 2000
output_size = 2
eps_start = 0.01
eps_stop = 0.1
eps = eps_start + (eps_stop - eps_start) * torch.rand(hidden_size, 1).to(device)
a = 0.25
J = (1 + a - torch.sqrt(1 + a * a - a + 3 * eps)) / 3
J = J.to(device)
p = KNNetParameters(
eps=eps, a=torch.as_tensor(a), J=J, q=1.1, g=0.1, x_th=torch.as_tensor(0.65),
beta=torch.as_tensor(0.0)
)
bifparams = []
bifparams_second = []
for i in np.arange(0.03, 0.04, 0.001):
for j in np.arange(0.025, 0.1, 0.002):
params_spiking = KNNetParameters(eps = j, beta = i, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
f_out_x, f_out_y = one_neuron(0.3, 0, nt, params_spiking)
f_out = np.concatenate([[f_out_x], [f_out_y]], 0).T
x_initial = 0.9 * torch.rand(hidden_size, 1).to(device)
y_initial = torch.zeros(hidden_size, 1).to(device)
z_initial = torch.zeros(hidden_size, 1).to(device)
ISPC_initial = torch.zeros(hidden_size, 1).to(device)
initial_state = KNNetState(x=x_initial, y=y_initial, z=z_initial, ISPC=ISPC_initial)
net = KNNet(input_size, hidden_size, output_size, p=p)
net.to_device(device)
lp = ForceParameters(stop_learning=icrit, start_learning=imin)
fl = ForceLearn(net=net, lp=lp, save_states=True)
train_logs, states = fl.train(target_outputs=f_out, state=initial_state)
L2 = torch.linalg.norm(train_logs[-1000:, 0, 0] - f_out[-1000:, 0])
L2_second = torch.linalg.norm(train_logs[-1000:, 1, 0] - f_out[-1000:, 1])
print(torch.log(L2))
bifparams.append([i, j, torch.log(L2).item()])
bifparams_second.append([i, j, torch.log(L2_second).item()])
print(f'1dim: {bifparams[-1]}, 2dim: {bifparams_second[-1]}')
bifparams = np.array(bifparams)
bifparams_second = np.array(bifparams_second)
np.save('./betaeps_3', bifparams)
np.save('./betaeps_second_3', bifparams_second) | [
"torch.rand",
"torch.sqrt",
"torch.zeros",
"torch.linalg.norm",
"torch.as_tensor",
"torch.log"
] | 1.8.2 | aw02m/Spiking_neural_networks | 4c23c50b52b15a9e5709cb672fd18cd22218b9f2 |
1.7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
created by Halo 2020/10/28 11:28
https://tangshusen.me/Dive-into-DL-PyTorch/#/chapter03_DL-basics/3.12_weight-decay
"""
import torch
import torch.nn as nn
import numpy as np
import mytorch.d2lzh_pytorch as d2l
n_train, n_test, num_inputs = 20, 100, 200
true_w, true_b = torch.ones(num_inputs, 1) * 0.01, 0.05
features = torch.randn((n_train + n_test, num_inputs))
labels = torch.matmul(features, true_w) + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
train_features, test_features = features[:n_train, :], features[n_train:, :]
train_labels, test_labels = labels[:n_train], labels[n_train:]
def init_params():
w = torch.randn((num_inputs, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
return [w, b]
def l2_penalty(w):
return (w ** 2).sum() / 2
batch_size, num_epochs, lr = 1, 100, 0.003
net, loss = d2l.linreg, d2l.squared_loss
dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)
def fit_and_plot(lambd):
w, b = init_params()
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
l = loss(net(X, w, b), y) + lambd * l2_penalty(w)
l = l.sum()
if w.grad is not None:
w.grad.data.zero_()
b.grad.data.zero_()
l.backward()
d2l.sgd([w, b], lr, batch_size)
train_ls.append(loss(net(train_features, w, b), train_labels).mean().item())
test_ls.append(loss(net(test_features, w, b), test_labels).mean().item())
d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('L2 norm of w:', w.norm().item())
# 权重衰减可以通过优化器中的weight_decay超参数来指定。
def fit_and_plot_pytorch(wd):
net = nn.Linear(num_inputs, 1)
nn.init.normal_(net.weight, mean=0, std=1)
nn.init.normal_(net.bias, mean=0, std=1)
optimizer_w = torch.optim.SGD(params=[net.weight], lr=lr, weight_decay=wd) # 对权重参数衰减
optimizer_b = torch.optim.SGD(params=[net.bias], lr=lr) # 不对偏差参数衰减
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
l = loss(net(X), y).mean()
optimizer_w.zero_grad()
optimizer_b.zero_grad()
l.backward()
# 对两个optimizer实例分别调用step函数,从而分别更新权重和偏差
optimizer_w.step()
optimizer_b.step()
train_ls.append(loss(net(train_features), train_labels).mean().item())
test_ls.append(loss(net(test_features), test_labels).mean().item())
d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('L2 norm of w:', net.weight.data.norm().item())
fit_and_plot(lambd=0)
fit_and_plot(lambd=3)
fit_and_plot_pytorch(0)
fit_and_plot_pytorch(3)
| [
"torch.zeros",
"torch.nn.Linear",
"torch.optim.SGD",
"torch.ones",
"torch.randn",
"torch.nn.init.normal_",
"torch.utils.data.DataLoader",
"torch.matmul",
"torch.utils.data.TensorDataset"
] | 1.7.0 | Halo1236/Dive-into-DL-PyTorch | 586b4e9ca77b2121ce5f5bec8b0a893b33f1b574 |
1.4 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from torch.nn import functional as F
from detectron2.layers import paste_masks_in_image
from detectron2.structures import Instances
def detector_postprocess(results, output_height, output_width, mask_threshold=0.5):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0])
results = Instances((output_height, output_width), **results.get_fields())
if results.has("pred_boxes"):
output_boxes = results.pred_boxes
elif results.has("proposal_boxes"):
output_boxes = results.proposal_boxes
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
if results.has("pred_masks"):
results.pred_masks = paste_masks_in_image(
results.pred_masks[:, 0, :, :], # N, 1, M, M
results.pred_boxes,
results.image_size,
threshold=mask_threshold,
)
if results.has("pred_keypoints"):
results.pred_keypoints[:, :, 0] *= scale_x
results.pred_keypoints[:, :, 1] *= scale_y
return results, output_boxes.nonempty()
def sem_seg_postprocess(result, img_size, output_height, output_width):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
where C is the number of classes, and H, W are the height and width of the prediction.
img_size (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
semantic segmentation prediction (Tensor): A tensor of the shape
(C, output_height, output_width) that contains per-pixel soft predictions.
"""
result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
result = F.interpolate(
result, size=(output_height, output_width), mode="bilinear", align_corners=False
)[0]
return result
| [
"torch.nn.functional.interpolate"
] | 1.4.0 | aleSuglia/py-bottom-up-attention | a97142ad3526c11272c471ee7d610494f1247b7b |
1.0 | """Training utilities."""
import os
from typing import Any, Dict, Union
import pytorch_lightning as pl
import torch
from loguru import logger
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from .dataset import (
DATASETS,
EnzymaticReactionDataset,
EnzymaticReactionLightningDataModule,
)
from .model import EnzymaticReactionLightningModule
def get_data_module(
dataset_args: Dict[str, Union[float, str, int]],
) -> EnzymaticReactionLightningDataModule:
"""
Get a data module for enzymatic reactions.
Args:
dataset_args: dictionary containing all the necessary parameters for the dataset creation.
Returns:
data module for enzymatic reactions.
"""
return EnzymaticReactionLightningDataModule(
dataset_args,
DATASETS.get(
str(dataset_args.get("dataset_type", "enzymatic")), EnzymaticReactionDataset
),
)
def train(
model_args: Dict[str, Union[float, str, int]],
model_architecture: Dict[str, Union[float, str, int]],
dataset_args: Dict[str, Union[float, str, int]],
trainer_args: Dict[str, Any],
) -> None:
"""
Train a model.
Args:
model_args: dictionary containing all the parameters for the mode configuration.
model_architecture: dictionary containing the information related to the architecture of the model.
dataset_args: dictionary containing all the necessary parameters for the dataset creation.
training_args: dictionary containing all the necessary parameters for the training routine.
"""
data_module = get_data_module(dataset_args)
model_architecture["vocab_size"] = data_module.train_dataset.tokenizer.vocab_size
model = EnzymaticReactionLightningModule(model_args, model_architecture)
log_dir = trainer_args["log_dir"]
os.makedirs(log_dir, exist_ok=True)
del trainer_args["log_dir"]
lightning_logger = WandbLogger(
name="mlm-logger", save_dir=log_dir, log_model=True, project="rxn-aa-mapper"
)
trainer_args["logger"] = lightning_logger
if not torch.cuda.is_available():
del trainer_args["gpus"]
if not isinstance(trainer_args["val_check_interval"], int):
trainer_args["val_check_interval"] = 10000
logger.warning(
f"please set trainer['val_check_interval'] to an integer value, defaulting to {trainer_args['val_check_interval']}"
)
if (
"accelerator" not in trainer_args
or trainer_args.get("accelerator", "ddp") == "ddp_spawn"
):
trainer_args["accelerator"] = "ddp"
logger.warning(
f"ddp_spawn not supported because of pickle issues, defaulting to {trainer_args['accelerator']}"
)
# gather the callbacks
trainer_args["callbacks"] = []
if "early_stopping_callback" in trainer_args:
callback: Callback = EarlyStopping(**trainer_args["early_stopping_callback"])
del trainer_args["early_stopping_callback"]
trainer_args["callbacks"].append(callback)
if "model_checkpoint_callback" in trainer_args:
callback = ModelCheckpoint(**trainer_args["model_checkpoint_callback"])
del trainer_args["model_checkpoint_callback"]
trainer_args["callbacks"].append(callback)
trainer = pl.Trainer(**trainer_args)
trainer.fit(model, data_module)
def checkpoint_to_module(
input_checkpoint: str,
model_args: Dict[str, Union[float, str, int]],
model_architecture: Dict[str, Union[float, str, int]],
) -> EnzymaticReactionLightningModule:
"""
Transform a checkpoint into a module.
Args:
input_checkpoint: model checkpoint.
model_args: dictionary containing all the parameters for the mode configuration.
model_architecture: dictionary containing the information related to the architecture of the model.
Returns:
the ligthining module.
"""
return EnzymaticReactionLightningModule.load_from_checkpoint(
checkpoint_path=input_checkpoint,
model_args=model_args,
model_architecture=model_architecture,
)
| [
"torch.cuda.is_available"
] | 1.0 | yvesnana/rxnaamapper | 48fb6a6f45f5ec087f99cedbac34eda2a65e14a3 |
1.9 | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
# This file has been modified to enable CPU inference!
import torch
from torch.autograd import Variable
import torch.nn.functional as F
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output):
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(z * z) / (2 * self.sigma * self.sigma) - log_s_total - log_det_W_total
return loss / (z.size(0) * z.size(1) * z.size(2))
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, "W_inverse"):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == "torch.cuda.HalfTensor":
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W)
z = self.conv(z)
return z, log_det_W
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary difference
from WaveNet is the convolutions need not be causal. There is also no dilation
size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels, kernel_size):
super(WN, self).__init__()
assert kernel_size % 2 == 1
assert n_channels % 2 == 0
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name="weight")
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels * n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size, dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
output = torch.zeros_like(audio)
n_channels_tensor = torch.IntTensor([self.n_channels])
spect = self.cond_layer(spect)
for i in range(self.n_layers):
spect_offset = i * 2 * self.n_channels
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
spect[:, spect_offset : spect_offset + 2 * self.n_channels, :],
n_channels_tensor,
)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = audio + res_skip_acts[:, : self.n_channels, :]
output = output + res_skip_acts[:, self.n_channels :, :]
else:
output = output + res_skip_acts
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every, n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels, n_mel_channels, 1024, stride=256)
assert n_group % 2 == 0
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group / 2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size / 2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels # Useful during inference
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert spect.size(2) >= audio.size(1)
if spect.size(2) > audio.size(1):
spect = spect[:, :, : audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:, : self.n_early_size, :])
audio = audio[:, self.n_early_size :, :]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s) * audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1], 1)
output_audio.append(audio)
return torch.cat(output_audio, 1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
if spect.type() == "torch.cuda.HalfTensor":
audio = torch.cuda.HalfTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_()
else:
if torch.cuda.is_available():
audio = torch.cuda.FloatTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_()
else:
audio = torch.FloatTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_()
audio = torch.autograd.Variable(sigma * audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k](audio, reverse=True)
if k % self.n_early_every == 0 and k > 0:
if spect.type() == "torch.cuda.HalfTensor":
z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
else:
if torch.cuda.is_available():
z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
else:
z = torch.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
audio = torch.cat((sigma * z, audio), 1)
audio = audio.permute(0, 2, 1).contiguous().view(audio.size(0), -1).data
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layer = torch.nn.utils.remove_weight_norm(WN.cond_layer)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
| [
"torch.sigmoid",
"torch.cat",
"torch.nn.ConvTranspose1d",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.IntTensor",
"torch.autograd.Variable",
"torch.nn.utils.remove_weight_norm",
"torch.FloatTensor",
"torch.det",
"torch.nn.functional.conv1d",
"torch.cuda.is_available",
"torch.logdet",
"torch.zeros_like",
"torch.tanh",
"torch.nn.utils.weight_norm",
"torch.exp",
"torch.sum"
] | 1.9.0 | brooklynbagel/Voice-Cloning-App | 6e0034dc0b4e21f669d28753b5f30b32cca382ad |
1.8 | import warnings
from typing import Any, Dict, Optional, Type, Union
import numpy as np
import torch as th
from mod_gym.gym import spaces
from torch.nn import functional as F
from mod_stable_baselines3.stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from mod_stable_baselines3.stable_baselines3.common.policies import ActorCriticPolicy
from mod_stable_baselines3.stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from mod_stable_baselines3.stable_baselines3.common.utils import explained_variance, get_schedule_fn
class PPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
and Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)
NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)
See https://github.com/pytorch/pytorch/issues/29372
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: Optional[int] = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(PPO, self).__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=False,
supported_action_spaces=(
spaces.Box,
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
# Sanity check, otherwise it will lead to noisy gradient and NaN
# because of the advantage normalization
assert (
batch_size > 1
), "`batch_size` must be greater than 1. See https://github.com/DLR-RM/stable-baselines3/issues/440"
if self.env is not None:
# Check that `n_steps * n_envs > 1` to avoid NaN
# when doing advantage normalization
buffer_size = self.env.num_envs * self.n_steps
assert (
buffer_size > 1
), f"`n_steps * n_envs` must be greater than 1. Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}"
# Check that the rollout buffer size is a multiple of the mini-batch size
untruncated_batches = buffer_size // batch_size
if buffer_size % batch_size > 0:
warnings.warn(
f"You have specified a mini-batch size of {batch_size},"
f" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`,"
f" after every {untruncated_batches} untruncated mini-batches,"
f" there will be a truncated mini-batch of size {buffer_size % batch_size}\n"
f"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\n"
f"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})"
)
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.target_kl = target_kl
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(PPO, self)._setup_model()
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
if isinstance(self.clip_range_vf, (float, int)):
assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
continue_training = True
# train for n_epochs epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Re-sample the noise matrix because the log_std has changed
# TODO: investigate why there is no issue with the gradient
# if that line is commented (as in SAC)
if self.use_sde:
self.policy.reset_noise(self.batch_size)
values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))
self.logger.record("train/clip_fraction", np.mean(clip_fractions))
self.logger.record("train/loss", loss.item())
self.logger.record("train/explained_variance", explained_var)
if hasattr(self.policy, "log_std"):
self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
self.logger.record("train/clip_range_vf", clip_range_vf)
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "PPO",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "PPO":
return super(PPO, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
| [
"torch.min",
"torch.no_grad",
"torch.clamp",
"torch.nn.functional.mse_loss",
"torch.abs",
"torch.exp",
"torch.mean"
] | 1.8.1 | Practical-Formal-Methods/mod_stable_baselines3 | 08bdb0a529c8ab446ac7973f2a02f832c0c3f454 |
1.8 | # Copyright 2021 cstsunfu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch
from typing import Dict, List
from . import module_register, module_config_register, Module
from dlk.utils.config import BaseConfig
@module_config_register("biaffine")
class BiAffineConfig(BaseConfig):
"""Config for BiAffine
Config Example:
>>> {
>>> "config": {
>>> "input_size": 256,
>>> "output_size": 2,
>>> "dropout": 0.0, //generally no need dropout
>>> "bias": true, // use bias or not in biaffine
>>> },
>>> "_name": "biaffine",
>>> }
"""
def __init__(self, config: Dict):
super(BiAffineConfig, self).__init__(config)
config = config['config']
self.input_size = config['input_size']
self.output_size = config['output_size']
self.dropout = float(config['dropout'])
self.bias = config['bias']
self.post_check(config, used=[
"input_size",
"output_size",
"dropout",
"bias",
])
@module_register("biaffine")
class BiAffine(Module):
"""wrap for nn.BiAffine"""
def __init__(self, config: BiAffineConfig):
super(BiAffine, self).__init__()
if config.bias:
self.biaffine = nn.Parameter(torch.randn(config.input_size+1, config.output_size, config.input_size+1))
else:
self.biaffine = nn.Parameter(torch.randn(config.input_size, config.output_size, config.input_size))
self.dropout = nn.Dropout(p=float(config.dropout))
self.config = config
def init_weight(self, method):
"""init the weight of submodules by 'method'
Args:
method: init method
Returns:
None
"""
torch.nn.init.xavier_uniform_(self.biaffine)
def forward(self, input_a: torch.Tensor, input_b: torch.Tensor)->torch.Tensor:
"""do forward on a mini batch
Args:
input_a: a mini batch inputs_a, shape==(batch_size, input_a_len, input_size)
input_b: a mini batch inputs_b, shape==(batch_size, input_b_len, input_size)
Returns:
input_a x biaffine x input_b, shape==(batch_size, input_a_len, input_b_len, output_size)
"""
if self.config.bias:
output = self.dropout(torch.einsum('bmi,ioj,bnj->bmno',
torch.cat((input_a, torch.ones_like(input_a[..., :1])), dim=-1),
self.biaffine,
torch.cat((input_b, torch.ones_like(input_b[..., :1])), dim=-1)
))
else:
output = self.dropout(torch.einsum('bmi,ioj,bnj->bmno',
input_a,
self.biaffine,
input_b,
))
return output
| [
"torch.ones_like",
"torch.nn.init.xavier_uniform_",
"torch.randn",
"torch.einsum"
] | 1.8.2 | cstsunfu/dlkit | 69e0efd372fa5c0ae5313124d0ba1ef55b535196 |
1.8 | '''
Accelerate demo with fp16 and multi-gpu support.
Single CPU:
python accelerate_demo.py --cpu
16-bit Floating Point:
python accelerate_demo.py --fp16
Model from timm:
python accelerate_demo.py --timm
Singe-GPU:
python accelerate_demo.py
Multi-GPU or Multi-CPU:
accelerate config
accelerate launch accelerate_demo.py
'''
import torch
import wandb
import datetime
import timm
import torchvision
import argparse
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from ui import progress_bar
from accelerate import Accelerator
def init_wandb():
wandb.login()
config = {
"learning_rate": 0.1,
"epochs": 100,
"batch_size": 128,
"dataset": "cifar10"
}
run = wandb.init(project="accelerate-options-project", entity="upeee", config=config)
return run
def run_experiment(args):
accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu)
_ = init_wandb()
# With timm, no need to manually replace the classifier head.
# Just initialize the model with the correct number of classes.
# However, timm model has a lower accuracy (TODO: why?)
if args.timm:
model = timm.create_model('resnet18', pretrained=False, num_classes=10)
else:
model = torchvision.models.resnet18(pretrained=False, progress=True)
model.fc = torch.nn.Linear(model.fc.in_features, 10)
# wandb will automatically log the model gradients.
wandb.watch(model)
loss = torch.nn.CrossEntropyLoss()
optimizer = SGD(model.parameters(), lr=wandb.config.learning_rate)
scheduler = CosineAnnealingLR(optimizer, T_max=wandb.config.epochs)
x_train = datasets.CIFAR10(root='./data', train=True,
download=True,
transform=transforms.ToTensor())
x_test = datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transforms.ToTensor())
train_loader = DataLoader(x_train,
batch_size=wandb.config.batch_size,
shuffle=True,
num_workers=2)
test_loader = DataLoader(x_test,
batch_size=wandb.config.batch_size,
shuffle=False,
num_workers=2)
label_human = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
table_test = wandb.Table(columns=['Image', "Ground Truth", "Initial Pred Label",])
image, label = iter(test_loader).next()
image = image.to(accelerator.device)
# Accelerate API
model = accelerator.prepare(model)
optimizer = accelerator.prepare(optimizer)
scheduler = accelerator.prepare(scheduler)
train_loader = accelerator.prepare(train_loader)
test_loader = accelerator.prepare(test_loader)
model.eval()
with torch.no_grad():
pred = torch.argmax(model(image), dim=1).cpu().numpy()
for i in range(8):
table_test.add_data(wandb.Image(image[i]),
label_human[label[i]],
label_human[pred[i]])
accelerator.print(label_human[label[i]], "vs. ", label_human[pred[i]])
start_time = datetime.datetime.now()
best_acc = 0
for epoch in range(wandb.config["epochs"]):
train_acc, train_loss = train(epoch, model, optimizer, scheduler, train_loader, loss, accelerator)
test_acc, test_loss = test(model, test_loader, loss, accelerator)
if test_acc > best_acc:
wandb.run.summary["Best accuracy"] = test_acc
best_acc = test_acc
if args.fp16:
accelerator.save(model.state_dict(), "./resnet18_best_acc_fp16.pth")
else:
accelerator.save(model, "./resnet18_best_acc.pth")
wandb.log({
"Train accuracy": train_acc,
"Test accuracy": test_acc,
"Train loss": train_loss,
"Test loss": test_loss,
"Learning rate": optimizer.param_groups[0]['lr']
})
elapsed_time = datetime.datetime.now() - start_time
accelerator.print("Elapsed time: %s" % elapsed_time)
wandb.run.summary["Elapsed train time"] = str(elapsed_time)
wandb.run.summary["Fp16 enabled"] = str(args.fp16)
wandb.run.summary["Using timm"] = str(args.timm)
wandb.run.summary["Using CPU"] = str(args.cpu)
model.eval()
with torch.no_grad():
pred = torch.argmax(model(image), dim=1).cpu().numpy()
final_pred = []
for i in range(8):
final_pred.append(label_human[pred[i]])
accelerator.print(label_human[label[i]], "vs. ", final_pred[i])
table_test.add_column(name="Final Pred Label", data=final_pred)
wandb.log({"Test data": table_test})
wandb.finish()
def train(epoch, model, optimizer, scheduler, train_loader, loss, accelerator):
model.train()
train_loss = 0
correct = 0
train_samples = 0
# sample a batch. compute loss and backpropagate
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss_value = loss(output, target)
accelerator.backward(loss_value)
optimizer.step()
scheduler.step(epoch)
train_loss += loss_value.item()
train_samples += len(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
if batch_idx % 10 == 0:
accuracy = 100. * correct / len(train_loader.dataset)
progress_bar(batch_idx,
len(train_loader),
'Train Epoch: {}, Loss: {:0.2e}, Acc: {:.2f}%'.format(epoch+1,
train_loss/train_samples, accuracy))
train_loss /= len(train_loader.dataset)
accuracy = 100. * correct / len(train_loader.dataset)
return accuracy, train_loss
def test(model, test_loader, loss, accelerator):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = model(data)
test_loss += loss(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
accelerator.print('\nTest Loss: {:.4f}, Acc: {:.2f}%\n'.format(test_loss, accuracy))
return accuracy, test_loss
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument("--timm", action="store_true", help="If passed, build model using timm library.")
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.")
# Seems that this is not supported in the Accelerator version installed
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
run_experiment(args)
if __name__ == "__main__":
main() | [
"torch.nn.Linear",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss"
] | 1.8.0 | Cahlil-Togonon/Deep-Learning-Experiments | 501ae610b0a8fb7fb75a53dcfdab71be49274b58 |
1.3 | import platform
import pytest
import torch
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Subset
import tests.base.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
def test_fit_train_loader_only(tmpdir):
model = EvalModelTemplate()
train_dataloader = model.train_dataloader()
model.train_dataloader = None
model.val_dataloader = None
model.test_dataloader = None
model.validation_step = None
model.validation_epoch_end = None
model.test_step = None
model.test_epoch_end = None
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, train_dataloader=train_dataloader)
def test_fit_val_loader_only(tmpdir):
model = EvalModelTemplate()
train_dataloader = model.train_dataloader()
val_dataloader = model.val_dataloader()
model.train_dataloader = None
model.val_dataloader = None
model.test_dataloader = None
model.test_step = None
model.test_epoch_end = None
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)
@pytest.mark.parametrize("dataloader_options", [
dict(train_percent_check=-0.1),
dict(train_percent_check=1.1),
dict(val_check_interval=1.1),
dict(val_check_interval=10000),
])
def test_dataloader_config_errors(tmpdir, dataloader_options):
model = EvalModelTemplate()
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
**dataloader_options,
)
with pytest.raises(ValueError):
trainer.fit(model)
def test_multiple_val_dataloader(tmpdir):
"""Verify multiple val_dataloader."""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple
model.validation_step = model.validation_step__multiple_dataloaders
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=1.0,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
# verify there are 2 val loaders
assert len(trainer.val_dataloaders) == 2, \
'Multiple val_dataloaders not initiated properly'
# make sure predictions are good for each val set
for dataloader in trainer.val_dataloaders:
tutils.run_prediction(dataloader, trainer.model)
def test_multiple_test_dataloader(tmpdir):
"""Verify multiple test_dataloader."""
model = EvalModelTemplate()
model.test_dataloader = model.test_dataloader__multiple
model.test_step = model.test_step__multiple_dataloaders
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
trainer.fit(model)
trainer.test()
# verify there are 2 test loaders
assert len(trainer.test_dataloaders) == 2, \
'Multiple test_dataloaders not initiated properly'
# make sure predictions are good for each test set
for dataloader in trainer.test_dataloaders:
tutils.run_prediction(dataloader, trainer.model)
# run the test method
trainer.test()
def test_train_dataloader_passed_to_fit(tmpdir):
"""Verify that train dataloader can be passed to fit """
# only train passed to fit
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True))
result = trainer.fit(model, **fit_options)
assert result == 1
def test_train_val_dataloaders_passed_to_fit(tmpdir):
""" Verify that train & val dataloader can be passed to fit """
# train, val passed to fit
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
result = trainer.fit(model, **fit_options)
assert result == 1
assert len(trainer.val_dataloaders) == 1, \
f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
def test_all_dataloaders_passed_to_fit(tmpdir):
"""Verify train, val & test dataloader(s) can be passed to fit and test method"""
model = EvalModelTemplate()
# train, val and test passed to fit
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
test_options = dict(test_dataloaders=model.dataloader(train=False))
result = trainer.fit(model, **fit_options)
trainer.test(**test_options)
assert result == 1
assert len(trainer.val_dataloaders) == 1, \
f'val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 1, \
f'test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
def test_multiple_dataloaders_passed_to_fit(tmpdir):
"""Verify that multiple val & test dataloaders can be passed to fit."""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
# train, multiple val and multiple test passed to fit
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=[model.dataloader(train=False),
model.dataloader(train=False)])
test_options = dict(test_dataloaders=[model.dataloader(train=False),
model.dataloader(train=False)])
trainer.fit(model, **fit_options)
trainer.test(**test_options)
assert len(trainer.val_dataloaders) == 2, \
f'Multiple `val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 2, \
f'Multiple `test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
def test_mixing_of_dataloader_options(tmpdir):
"""Verify that dataloaders can be passed to fit"""
model = EvalModelTemplate()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# fit model
trainer = Trainer(**trainer_options)
results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))
assert results
# fit model
trainer = Trainer(**trainer_options)
results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))
assert results
trainer.test(test_dataloaders=model.dataloader(train=False))
assert len(trainer.val_dataloaders) == 1, \
f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 1, \
f'`test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
def test_train_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.fit(model)
def test_val_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_percent_check=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.fit(model)
def test_test_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.test_dataloader = model.test_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, test_percent_check=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.test(model)
@pytest.mark.parametrize('check_interval', [50, 1.0])
def test_inf_train_dataloader(tmpdir, check_interval):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=check_interval
)
result = trainer.fit(model)
# verify training completed
assert result == 1
@pytest.mark.parametrize('check_interval', [1.0])
def test_inf_val_dataloader(tmpdir, check_interval):
"""Test inf val data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__infinite
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=check_interval,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
def test_error_on_zero_len_dataloader(tmpdir):
""" Test that error is raised if a zero-length dataloader is defined """
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__zero_length
# fit model
with pytest.raises(ValueError):
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
test_percent_check=0.5
)
trainer.fit(model)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
def test_warning_with_few_workers(tmpdir):
""" Test that error is raised if dataloader with only a few workers is used """
model = EvalModelTemplate()
# logger file to get meta
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
test_options = dict(test_dataloaders=model.dataloader(train=False))
trainer = Trainer(**trainer_options)
# fit model
with pytest.warns(UserWarning, match='train'):
trainer.fit(model, **fit_options)
with pytest.warns(UserWarning, match='val'):
trainer.fit(model, **fit_options)
with pytest.warns(UserWarning, match='test'):
trainer.test(**test_options)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
def test_dataloader_reinit_for_subclass():
class CustomDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, dummy_kwarg=None):
super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler,
num_workers, collate_fn, pin_memory, drop_last, timeout,
worker_init_fn)
self.dummy_kwarg = dummy_kwarg
trainer = Trainer(
gpus=[0, 1],
num_nodes=1,
distributed_backend='ddp',
)
class CustomDummyObj:
sampler = None
result = trainer.auto_add_sampler(CustomDummyObj(), train=True)
assert isinstance(result, CustomDummyObj), "Wrongly reinstantiated data loader"
result = trainer.auto_add_sampler(CustomDataLoader(list(range(1000))), train=True)
assert isinstance(result, torch.utils.data.DataLoader)
assert isinstance(result, CustomDataLoader)
assert hasattr(result, 'dummy_kwarg')
@pytest.mark.skipif(torch.cuda.device_count() < 3, reason='Test requires multiple GPUs')
def test_batch_size_smaller_than_num_gpus():
# we need at least 3 gpus for this test
num_gpus = 3
batch_size = 3
class CurrentTestModel(EvalModelTemplate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# batch norm doesn't work with batch size 1, we replace it
self.c_d1_bn = torch.nn.ReLU()
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
loss = output['loss']
# we make sure to add some metrics to the output dict,
# this is essential for this test
output['progress_bar'] = {'train_loss': loss}
return output
def train_dataloader(self):
dataloader = super().train_dataloader()
# construct a dataset with a size that is not divisible by num_gpus
# therefore the last batch will have a size < num_gpus
size = num_gpus * batch_size + (num_gpus - 1)
dataset = Subset(dataloader.dataset, range(size))
dataloader = DataLoader(
dataset,
batch_size=self.hparams.batch_size,
drop_last=False,
)
return dataloader
hparams = EvalModelTemplate.get_default_hparams()
hparams.batch_size = batch_size
model = CurrentTestModel(hparams)
trainer = Trainer(
max_epochs=1,
val_percent_check=0,
gpus=num_gpus,
)
# we expect the reduction for the metrics also to happen on the last batch
# where we will get fewer metrics than gpus
result = trainer.fit(model)
assert 1 == result
| [
"torch.nn.ReLU",
"torch.utils.data.dataloader.DataLoader",
"torch.cuda.device_count"
] | 1.3 | binshengliu/pytorch-lightning | 8f6b7a2b4fea9b7bd0b873f5973e6364b3981412 |
0.4 | '''
Script to train the ranker
Should add some sort of image pool someday...?
'''
import time
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer
from models import networks
import pdb
import torch
from collections import OrderedDict
def load_chkpt(network, fname):
chkpt = torch.load(fname)
new_chkpt = OrderedDict()
for k, v in chkpt.items():
name = 'module.' + k # add `module`
new_chkpt[name] = v
network.load_state_dict(new_chkpt)
if __name__ == '__main__':
opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
model = create_model(opt)
model.setup(opt)
visualizer = Visualizer(opt)
total_steps = 0
'''
chkpt_D = torch.load('checkpoints/streetview_throttled/15_net_D.pth')
chkpt_G = torch.load('checkpoints/streetview_throttled/15_net_G.pth')
new_chkpt_D = OrderedDict()
new_chkpt_G = OrderedDict()
for k, v in chkpt_D.items():
name = 'module.' + k # add `module`
new_chkpt_D[name] = v
for k, v in chkpt_G.items():
name = 'module.' + k # add `module`
new_chkpt_G[name] = v
model.netD.load_state_dict(new_chkpt_D)
model.netG.load_state_dict(new_chkpt_G)
'''
G_model_chkpts = ['checkpoints/street_decaythrottle45_halflr/1_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/2_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/3_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/4_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/5_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/6_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/7_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/8_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/9_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/10_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/11_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/12_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/13_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/14_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/15_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/16_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/17_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/18_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/19_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/20_net_G.pth']
G_networks = []
for i in range(len(G_model_chkpts)):
netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, opt.gpu_ids)
load_chkpt(netG, G_model_chkpts[i])
G_networks.append(netG)
netGs = networks.RandomNetwork(G_networks)
#load_chkpt(model.netG, 'checkpoints/streetview_throttled/15_net_G.pth')
model.netG = netGs
load_chkpt(model.netD, 'checkpoints/street_decaythrottle45_halflr/20_net_D.pth')
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset):
iter_start_time = time.time()
if total_steps % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data)
# optimize only discriminator
model.forward()
model.set_requires_grad(model.netD, True)
model.optimizer_D.zero_grad()
model.backward_D()
model.optimizer_D.step()
model.set_requires_grad(model.netD, False)
# need this to prevent logger from complaining
# because it wants to log the G loss, even though
# we aren't updating it
model.backward_G()
if total_steps % opt.display_freq == 0:
save_result = total_steps % opt.update_html_freq == 0
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_steps % opt.print_freq == 0:
losses = model.get_current_losses()
t = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_networks('latest', saveG=False)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save_networks('latest', saveG=False)
model.save_networks(epoch, saveG=False)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
| [
"torch.load"
] | 0.4.0 | dangeng/infiniteGANorama | 92c9cbe0638cf9fcdc05020759772e36aebf788c |
1.5 | #!/usr/bin/env python
"""
Simple implementation for mixup. The loss and onehot functions origin from: https://github.com/moskomule/mixup.pytorch
Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz: mixup: Beyond Empirical Risk Minimization
https://arxiv.org/abs/1710.09412
"""
__all__ = [ 'mixup_cross_entropy_loss', 'mixup' ]
import numpy as np
import torch
from torch.autograd import Variable
def mixup_cross_entropy_loss(input, target, size_average=True):
"""Origin: https://github.com/moskomule/mixup.pytorch
in PyTorch's cross entropy, targets are expected to be labels
so to predict probabilities this loss is needed
suppose q is the target and p is the input
loss(p, q) = -\sum_i q_i \log p_i
"""
assert input.size() == target.size()
assert isinstance(input, Variable) and isinstance(target, Variable)
input = torch.log(torch.nn.functional.softmax(input, dim=1).clamp(1e-5, 1))
# input = input - torch.log(torch.sum(torch.exp(input), dim=1)).view(-1, 1)
loss = - torch.sum(input * target)
return loss / input.size()[0] if size_average else loss
def onehot(targets, num_classes):
"""Origin: https://github.com/moskomule/mixup.pytorch
convert index tensor into onehot tensor
:param targets: index tensor
:param num_classes: number of classes
"""
assert isinstance(targets, torch.LongTensor)
return torch.zeros(targets.size()[0], num_classes).scatter_(1, targets.view(-1, 1), 1)
def mixup(inputs, targets, num_classes, alpha=2):
"""Mixup on 1x32x32 mel-spectrograms.
"""
s = inputs.size()[0]
weight = torch.Tensor(np.random.beta(alpha, alpha, s))
index = np.random.permutation(s)
x1, x2 = inputs, inputs[index, :, :, :]
y1, y2 = onehot(targets, num_classes), onehot(targets[index,], num_classes)
weight = weight.view(s, 1, 1, 1)
inputs = weight*x1 + (1-weight)*x2
weight = weight.view(s, 1)
targets = weight*y1 + (1-weight)*y2
return inputs, targets
| [
"torch.nn.functional.softmax",
"torch.sum"
] | 1.5.1 | bozliu/E2E-Keyword-Spotting | 64fc6fe414370a12a22fdf8ca5c8379d2c60b64e |
0.4 | """
A :class:`~allennlp.training.trainer.Trainer` is responsible for training a
:class:`~allennlp.models.model.Model`.
Typically you might create a configuration file specifying the model and
training parameters and then use :mod:`~allennlp.commands.train`
rather than instantiating a ``Trainer`` yourself.
"""
# pylint: disable=too-many-lines
import logging
import os
import shutil
import time
import re
import datetime
import traceback
from typing import Dict, Optional, List, Tuple, Union, Iterable, Any, Set
import torch
import torch.optim.lr_scheduler
from torch.nn.parallel import replicate, parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from tensorboardX import SummaryWriter
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import peak_memory_mb, gpu_memory_mb
from allennlp.common.tqdm import Tqdm
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.models.model import Model
from allennlp.nn import util
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def is_sparse(tensor):
return tensor.is_sparse
def sparse_clip_norm(parameters, max_norm, norm_type=2) -> float:
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Supports sparse gradients.
Parameters
----------
parameters : ``(Iterable[torch.Tensor])``
An iterable of Tensors that will have gradients normalized.
max_norm : ``float``
The max norm of the gradients.
norm_type : ``float``
The type of the used p-norm. Can be ``'inf'`` for infinity norm.
Returns
-------
Total norm of the parameters (viewed as a single vector).
"""
# pylint: disable=invalid-name,protected-access
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
if is_sparse(p.grad):
# need to coalesce the repeated indices before finding norm
grad = p.grad.data.coalesce()
param_norm = grad._values().norm(norm_type)
else:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
if is_sparse(p.grad):
p.grad.data._values().mul_(clip_coef)
else:
p.grad.data.mul_(clip_coef)
return total_norm
def move_optimizer_to_cuda(optimizer):
"""
Move the optimizer state to GPU, if necessary.
After calling, any parameter specific state in the optimizer
will be located on the same device as the parameter.
"""
for param_group in optimizer.param_groups:
for param in param_group['params']:
if param.is_cuda:
param_state = optimizer.state[param]
for k in param_state.keys():
if isinstance(param_state[k], torch.Tensor):
param_state[k] = param_state[k].cuda(device=param.get_device())
class TensorboardWriter:
"""
Wraps a pair of ``SummaryWriter`` instances but is a no-op if they're ``None``.
Allows Tensorboard logging without always checking for Nones first.
"""
def __init__(self, train_log: SummaryWriter = None, validation_log: SummaryWriter = None) -> None:
self._train_log = train_log
self._validation_log = validation_log
@staticmethod
def _item(value: Any):
if hasattr(value, 'item'):
val = value.item()
else:
val = value
return val
def add_train_scalar(self, name: str, value: float, global_step: int) -> None:
# get the scalar
if self._train_log is not None:
self._train_log.add_scalar(name, self._item(value), global_step)
def add_train_histogram(self, name: str, values: torch.Tensor, global_step: int) -> None:
if self._train_log is not None:
if isinstance(values, torch.Tensor):
values_to_write = values.cpu().data.numpy().flatten()
self._train_log.add_histogram(name, values_to_write, global_step)
def add_validation_scalar(self, name: str, value: float, global_step: int) -> None:
if self._validation_log is not None:
self._validation_log.add_scalar(name, self._item(value), global_step)
def time_to_str(timestamp: int) -> str:
"""
Convert seconds past Epoch to human readable string.
"""
datetimestamp = datetime.datetime.fromtimestamp(timestamp)
return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format(
datetimestamp.year, datetimestamp.month, datetimestamp.day,
datetimestamp.hour, datetimestamp.minute, datetimestamp.second
)
def str_to_time(time_str: str) -> datetime.datetime:
"""
Convert human readable string to datetime.datetime.
"""
pieces: Any = [int(piece) for piece in time_str.split('-')]
return datetime.datetime(*pieces)
class Trainer:
def __init__(self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
validation_dataset: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
model_save_interval: float = None,
cuda_device: Union[int, List] = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None) -> None:
"""
Parameters
----------
model : ``Model``, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their ``forward`` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
optimizer : ``torch.nn.Optimizer``, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
iterator : ``DataIterator``, required.
A method for iterating over a ``Dataset``, yielding padded indexed batches.
train_dataset : ``Dataset``, required.
A ``Dataset`` to train on. The dataset should have already been indexed.
validation_dataset : ``Dataset``, optional, (default = None).
A ``Dataset`` to evaluate on. The dataset should have already been indexed.
patience : Optional[int] > 0, optional (default=None)
Number of epochs to be patient before early stopping: the training is stopped
after ``patience`` epochs with no improvement. If given, it must be ``> 0``.
If None, early stopping is disabled.
validation_metric : str, optional (default="loss")
Validation metric to measure for whether to stop training using patience
and whether to serialize an ``is_best`` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_iterator : ``DataIterator``, optional (default=None)
An iterator to use for the validation set. If ``None``, then
use the training `iterator`.
num_epochs : int, optional (default = 20)
Number of training epochs.
serialization_dir : str, optional (default=None)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
num_serialized_models_to_keep : ``int``, optional (default=20)
Number of previous model checkpoints to retain. Default is to keep 20 checkpoints.
A value of None or -1 means all checkpoints will be kept.
keep_serialized_model_every_num_seconds : ``int``, optional (default=None)
If num_serialized_models_to_keep is not None, then occasionally it's useful to
save models at a given interval in addition to the last num_serialized_models_to_keep.
To do so, specify keep_serialized_model_every_num_seconds as the number of seconds
between permanently saved checkpoints. Note that this option is only used if
num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.
model_save_interval : ``float``, optional (default=None)
If provided, then serialize models every ``model_save_interval``
seconds within single epochs. In all cases, models are also saved
at the end of every epoch if ``serialization_dir`` is provided.
cuda_device : ``int``, optional (default = -1)
An integer specifying the CUDA device to use. If -1, the CPU is used.
grad_norm : ``float``, optional, (default = None).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : ``float``, optional (default = ``None``).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting ``NaNs`` in your gradients during training
that are not solved by using ``grad_norm``, you may need this.
learning_rate_scheduler : ``PytorchLRScheduler``, optional, (default = None)
A Pytorch learning rate scheduler. The learning rate will be decayed with respect to
this schedule at the end of each epoch. If you use
:class:`torch.optim.lr_scheduler.ReduceLROnPlateau`, this will use the ``validation_metric``
provided to determine if learning has plateaued. To support updating the learning
rate on every batch, this can optionally implement ``step_batch(batch_num_total)`` which
updates the learning rate given the batch number.
summary_interval: ``int``, optional, (default = 100)
Number of batches between logging scalars to tensorboard
histogram_interval : ``int``, optional, (default = ``None``)
If not None, then log histograms to tensorboard every ``histogram_interval`` batches.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
``model.get_parameters_for_histogram_tensorboard_logging``.
The layer activations are logged for any modules in the ``Model`` that have
the attribute ``should_log_activations`` set to ``True``. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
"""
self._model = model
self._iterator = iterator
self._validation_iterator = validation_iterator
self._optimizer = optimizer
self._train_data = train_dataset
self._validation_data = validation_dataset
if patience is None: # no early stopping
if validation_dataset:
logger.warning('You provided a validation dataset but patience was set to None, '
'meaning that early stopping is disabled')
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError('{} is an invalid value for "patience": it must be a positive integer '
'or None (if you want to disable early stopping)'.format(patience))
self._patience = patience
self._num_epochs = num_epochs
self._serialization_dir = serialization_dir
self._num_serialized_models_to_keep = num_serialized_models_to_keep
self._keep_serialized_model_every_num_seconds = keep_serialized_model_every_num_seconds
self._serialized_paths: List[Any] = []
self._last_permanent_saved_checkpoint_time = time.time()
self._model_save_interval = model_save_interval
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
increase_or_decrease = validation_metric[0]
if increase_or_decrease not in ["+", "-"]:
raise ConfigurationError("Validation metrics must specify whether they should increase "
"or decrease by pre-pending the metric name with a +/-.")
self._validation_metric = validation_metric[1:]
self._validation_metric_decreases = increase_or_decrease == "-"
if not isinstance(cuda_device, int) and not isinstance(cuda_device, list):
raise ConfigurationError("Expected an int or list for cuda_device, got {}".format(cuda_device))
if isinstance(cuda_device, list):
logger.info(f"WARNING: Multiple GPU support is experimental not recommended for use. "
"In some cases it may lead to incorrect results or undefined behavior.")
self._multiple_gpu = True
self._cuda_devices = cuda_device
# data_parallel will take care of transfering to cuda devices,
# so the iterator keeps data on CPU.
self._iterator_device = -1
else:
self._multiple_gpu = False
self._cuda_devices = [cuda_device]
self._iterator_device = cuda_device
if self._cuda_devices[0] != -1:
self._model = self._model.cuda(self._cuda_devices[0])
self._log_interval = 10 # seconds
self._summary_interval = summary_interval
self._histogram_interval = histogram_interval
self._log_histograms_this_batch = False
# We keep the total batch number as a class variable because it
# is used inside a closure for the hook which logs activations in
# ``_enable_activation_logging``.
self._batch_num_total = 0
self._last_log = 0.0 # time of last logging
if serialization_dir is not None:
train_log = SummaryWriter(os.path.join(serialization_dir, "log", "train"))
validation_log = SummaryWriter(os.path.join(serialization_dir, "log", "validation"))
self._tensorboard = TensorboardWriter(train_log, validation_log)
else:
self._tensorboard = TensorboardWriter()
self._warned_tqdm_ignores_underscores = False
def _enable_gradient_clipping(self) -> None:
if self._grad_clipping is not None:
# Pylint is unable to tell that we're in the case that _grad_clipping is not None...
# pylint: disable=invalid-unary-operand-type
clip_function = lambda grad: grad.clamp(-self._grad_clipping, self._grad_clipping)
for parameter in self._model.parameters():
if parameter.requires_grad:
parameter.register_hook(clip_function)
def _enable_activation_logging(self) -> None:
"""
Log activations to tensorboard
"""
if self._histogram_interval is not None:
# To log activation histograms to the forward pass, we register
# a hook on forward to capture the output tensors.
# This uses a closure on self._log_histograms_this_batch to
# determine whether to send the activations to tensorboard,
# since we don't want them on every call.
for _, module in self._model.named_modules():
if not getattr(module, 'should_log_activations', False):
# skip it
continue
def hook(module_, inputs, outputs):
# pylint: disable=unused-argument,cell-var-from-loop
log_prefix = 'activation_histogram/{0}'.format(module_.__class__)
if self._log_histograms_this_batch:
if isinstance(outputs, torch.Tensor):
log_name = log_prefix
self._tensorboard.add_train_histogram(log_name,
outputs,
self._batch_num_total)
elif isinstance(outputs, (list, tuple)):
for i, output in enumerate(outputs):
log_name = "{0}_{1}".format(log_prefix, i)
self._tensorboard.add_train_histogram(log_name,
output,
self._batch_num_total)
elif isinstance(outputs, dict):
for k, tensor in outputs.items():
log_name = "{0}_{1}".format(log_prefix, k)
self._tensorboard.add_train_histogram(log_name,
tensor,
self._batch_num_total)
else:
# skip it
pass
module.register_forward_hook(hook)
def _rescale_gradients(self) -> Optional[float]:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
"""
if self._grad_norm:
parameters_to_clip = [p for p in self._model.parameters()
if p.grad is not None]
return sparse_clip_norm(parameters_to_clip, self._grad_norm)
return None
def _data_parallel(self, batch):
"""
Do the forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
used_device_ids = self._cuda_devices[:len(inputs)]
replicas = replicate(self._model, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {'loss': losses.mean()}
def _batch_loss(self, batch: torch.Tensor, for_training: bool) -> torch.Tensor:
"""
Does a forward pass on the given batch and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
if self._multiple_gpu:
output_dict = self._data_parallel(batch)
else:
output_dict = self._model(**batch)
try:
loss = output_dict["loss"]
if for_training:
loss += self._model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError("The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
loss = None
return loss
def _get_metrics(self, total_loss: float, num_batches: int, reset: bool = False) -> Dict[str, float]:
"""
Gets the metrics but sets ``"loss"`` to
the total loss divided by the ``num_batches`` so that
the ``"loss"`` metric is "average loss per batch".
"""
metrics = self._model.get_metrics(reset=reset)
metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0
return metrics
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
logger.info(f"Peak CPU memory usage MB: {peak_memory_mb()}")
for gpu, memory in gpu_memory_mb().items():
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self._model.train()
# Get tqdm for the training batches
train_generator = self._iterator(self._train_data,
num_epochs=1,
cuda_device=self._iterator_device)
num_training_batches = self._iterator.get_num_batches(self._train_data)
train_generator_tqdm = Tqdm.tqdm(train_generator,
total=num_training_batches)
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
if self._histogram_interval is not None:
histogram_parameters = set(self._model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
for batch in train_generator_tqdm:
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
self._log_histograms_this_batch = self._histogram_interval is not None and (
batch_num_total % self._histogram_interval == 0)
self._optimizer.zero_grad()
loss = self._batch_loss(batch, for_training=True)
loss.backward()
train_loss += loss.item()
batch_grad_norm = self._rescale_gradients()
# This does nothing if batch_num_total is None or you are using an
# LRScheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._log_histograms_this_batch:
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {name: param.detach().cpu().clone()
for name, param in self._model.named_parameters()}
self._optimizer.step()
for name, param in self._model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1, ))
param_norm = torch.norm(param.view(-1, ))
self._tensorboard.add_train_scalar("gradient_update/" + name,
update_norm / (param_norm + 1e-7),
batch_num_total)
else:
self._optimizer.step()
# Update the description with the latest metrics
metrics = self._get_metrics(train_loss, batches_this_epoch)
description = self._description_from_metrics(metrics)
train_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard
if batch_num_total % self._summary_interval == 0:
self._parameter_and_gradient_statistics_to_tensorboard(batch_num_total, batch_grad_norm)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"], batch_num_total)
self._metrics_to_tensorboard(batch_num_total,
{"epoch_metrics/" + k: v for k, v in metrics.items()})
if self._log_histograms_this_batch:
self._histograms_to_tensorboard(batch_num_total, histogram_parameters)
# Save model if needed.
if self._model_save_interval is not None and (
time.time() - last_save_time > self._model_save_interval
):
last_save_time = time.time()
self._save_checkpoint(
'{0}.{1}'.format(epoch, time_to_str(int(last_save_time))), [], is_best=False
)
return self._get_metrics(train_loss, batches_this_epoch, reset=True)
def _should_stop_early(self, metric_history: List[float]) -> bool:
"""
uses patience and the validation metric to determine if training should stop early
"""
if self._patience and self._patience < len(metric_history):
# Pylint can't figure out that in this branch `self._patience` is an int.
# pylint: disable=invalid-unary-operand-type
# Is the best score in the past N epochs worse than or equal the best score overall?
if self._validation_metric_decreases:
return min(metric_history[-self._patience:]) >= min(metric_history[:-self._patience])
else:
return max(metric_history[-self._patience:]) <= max(metric_history[:-self._patience])
return False
def _parameter_and_gradient_statistics_to_tensorboard(self, # pylint: disable=invalid-name
epoch: int,
batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
# Log parameter values to Tensorboard
for name, param in self._model.named_parameters():
self._tensorboard.add_train_scalar("parameter_mean/" + name,
param.data.mean(),
epoch)
self._tensorboard.add_train_scalar("parameter_std/" + name, param.data.std(), epoch)
if param.grad is not None:
if is_sparse(param.grad):
# pylint: disable=protected-access
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable
self._tensorboard.add_train_scalar("gradient_mean/" + name,
grad_data.mean(),
epoch)
self._tensorboard.add_train_scalar("gradient_std/" + name,
grad_data.std(),
epoch)
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self._tensorboard.add_train_scalar("gradient_norm",
batch_grad_norm,
epoch)
def _histograms_to_tensorboard(self, epoch: int, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for name, param in self._model.named_parameters():
if name in histogram_parameters:
self._tensorboard.add_train_histogram("parameter_histogram/" + name,
param,
epoch)
def _metrics_to_tensorboard(self,
epoch: int,
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
"""
metric_names = set(train_metrics.keys())
if val_metrics is not None:
metric_names.update(val_metrics.keys())
val_metrics = val_metrics or {}
for name in metric_names:
train_metric = train_metrics.get(name)
if train_metric is not None:
self._tensorboard.add_train_scalar(name, train_metric, epoch)
val_metric = val_metrics.get(name)
if val_metric is not None:
self._tensorboard.add_validation_scalar(name, val_metric, epoch)
def _metrics_to_console(self, # pylint: disable=no-self-use
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Logs all of the train metrics (and validation metrics, if provided) to the console.
"""
val_metrics = val_metrics or {}
dual_message_template = "Training %s : %3f Validation %s : %3f "
message_template = "%s %s : %3f "
metric_names = set(train_metrics.keys())
if val_metrics:
metric_names.update(val_metrics.keys())
for name in metric_names:
train_metric = train_metrics.get(name)
val_metric = val_metrics.get(name)
if val_metric is not None and train_metric is not None:
logger.info(dual_message_template, name, train_metric, name, val_metric)
elif val_metric is not None:
logger.info(message_template, "Validation", name, val_metric)
elif train_metric is not None:
logger.info(message_template, "Training", name, train_metric)
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self._model.eval()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self._iterator
val_generator = val_iterator(self._validation_data,
num_epochs=1,
cuda_device=self._iterator_device)
num_validation_batches = val_iterator.get_num_batches(self._validation_data)
val_generator_tqdm = Tqdm.tqdm(val_generator,
total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch in val_generator_tqdm:
loss = self._batch_loss(batch, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = self._get_metrics(val_loss, batches_this_epoch)
description = self._description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
return val_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
epoch_counter, validation_metric_per_epoch = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError("Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?")
self._enable_gradient_clipping()
self._enable_activation_logging()
logger.info("Beginning training.")
train_metrics: Dict[str, float] = {}
val_metrics: Dict[str, float] = {}
epochs_trained = 0
training_start_time = time.time()
for epoch in range(epoch_counter, self._num_epochs):
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
if self._validation_data is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, num_batches = self._validation_loss()
val_metrics = self._get_metrics(val_loss, num_batches, reset=True)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
# Check validation metric to see if it's the best so far
is_best_so_far = self._is_best_so_far(this_epoch_val_metric, validation_metric_per_epoch)
validation_metric_per_epoch.append(this_epoch_val_metric)
if self._should_stop_early(validation_metric_per_epoch):
logger.info("Ran out of patience. Stopping training.")
break
else:
# No validation set, so just assume it's the best so far.
is_best_so_far = True
val_metrics = {}
this_epoch_val_metric = None
self._save_checkpoint(epoch, validation_metric_per_epoch, is_best=is_best_so_far)
self._metrics_to_tensorboard(epoch, train_metrics, val_metrics=val_metrics)
self._metrics_to_console(train_metrics, val_metrics)
if self._learning_rate_scheduler:
# The LRScheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", time.strftime("%H:%M:%S", time.gmtime(epoch_elapsed_time)))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * \
((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)
formatted_time = time.strftime("%H:%M:%S", time.gmtime(estimated_time_remaining))
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
training_elapsed_time = time.time() - training_start_time
metrics = {
"training_duration": time.strftime("%H:%M:%S", time.gmtime(training_elapsed_time)),
"training_start_epoch": epoch_counter,
"training_epochs": epochs_trained
}
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if validation_metric_per_epoch:
# We may not have had validation data, so we need to hide this behind an if.
if self._validation_metric_decreases:
best_validation_metric = min(validation_metric_per_epoch)
else:
best_validation_metric = max(validation_metric_per_epoch)
metrics[f"best_validation_{self._validation_metric}"] = best_validation_metric
metrics['best_epoch'] = [i for i, value in enumerate(validation_metric_per_epoch)
if value == best_validation_metric][-1]
return metrics
def _is_best_so_far(self,
this_epoch_val_metric: float,
validation_metric_per_epoch: List[float]):
if not validation_metric_per_epoch:
return True
elif self._validation_metric_decreases:
return this_epoch_val_metric < min(validation_metric_per_epoch)
else:
return this_epoch_val_metric > max(validation_metric_per_epoch)
def _description_from_metrics(self, metrics: Dict[str, float]) -> str:
if (not self._warned_tqdm_ignores_underscores and
any(metric_name.startswith("_") for metric_name in metrics)):
logger.warning("Metrics with names beginning with \"_\" will "
"not be logged to the tqdm progress bar.")
self._warned_tqdm_ignores_underscores = True
return ', '.join(["%s: %.4f" % (name, value) for name, value in
metrics.items() if not name.startswith("_")]) + " ||"
def _save_checkpoint(self,
epoch: Union[int, str],
val_metric_per_epoch: List[float],
is_best: Optional[bool] = None) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
Parameters
----------
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
is_best: bool, optional (default = None)
A flag which causes the model weights at the given epoch to
be copied to a "best.th" file. The value of this flag should
be based on some validation metric computed by your model.
"""
if self._serialization_dir is not None:
model_path = os.path.join(self._serialization_dir, "model_state_epoch_{}.th".format(epoch))
model_state = self._model.state_dict()
torch.save(model_state, model_path)
training_state = {'epoch': epoch,
'val_metric_per_epoch': val_metric_per_epoch,
'optimizer': self._optimizer.state_dict(),
'batch_num_total': self._batch_num_total}
training_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch))
torch.save(training_state, training_path)
if is_best:
logger.info("Best validation performance so far. "
"Copying weights to '%s/best.th'.", self._serialization_dir)
shutil.copyfile(model_path, os.path.join(self._serialization_dir, "best.th"))
if self._num_serialized_models_to_keep and self._num_serialized_models_to_keep >= 0:
self._serialized_paths.append([time.time(), model_path, training_path])
if len(self._serialized_paths) > self._num_serialized_models_to_keep:
paths_to_remove = self._serialized_paths.pop(0)
# Check to see if we should keep this checkpoint, if it has been longer
# then self._keep_serialized_model_every_num_seconds since the last
# kept checkpoint.
remove_path = True
if self._keep_serialized_model_every_num_seconds is not None:
save_time = paths_to_remove[0]
time_since_checkpoint_kept = save_time - self._last_permanent_saved_checkpoint_time
if time_since_checkpoint_kept > self._keep_serialized_model_every_num_seconds:
# We want to keep this checkpoint.
remove_path = False
self._last_permanent_saved_checkpoint_time = save_time
if remove_path:
for fname in paths_to_remove[1:]:
os.remove(fname)
def find_latest_checkpoint(self) -> Tuple[str, str]:
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (self._serialization_dir is not None and
any("model_state_epoch_" in x for x in os.listdir(self._serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(self._serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_state_epoch" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
found_epochs = [
# pylint: disable=anomalous-backslash-in-string
re.search("model_state_epoch_([0-9\.\-]+)\.th", x).group(1)
for x in model_checkpoints
]
int_epochs: Any = []
for epoch in found_epochs:
pieces = epoch.split('.')
if len(pieces) == 1:
# Just a single epoch without timestamp
int_epochs.append([int(pieces[0]), 0])
else:
# has a timestamp
int_epochs.append([int(pieces[0]), pieces[1]])
last_epoch = sorted(int_epochs, reverse=True)[0]
if last_epoch[1] == 0:
epoch_to_load = str(last_epoch[0])
else:
epoch_to_load = '{0}.{1}'.format(last_epoch[0], last_epoch[1])
model_path = os.path.join(self._serialization_dir,
"model_state_epoch_{}.th".format(epoch_to_load))
training_state_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch_to_load))
return (model_path, training_state_path)
def _restore_checkpoint(self) -> Tuple[int, List[float]]:
"""
Restores a model from a serialization_dir to the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
Returns
-------
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
latest_checkpoint = self.find_latest_checkpoint()
if latest_checkpoint is None:
# No checkpoint to restore, start at 0
return 0, []
model_path, training_state_path = latest_checkpoint
# Load the parameters onto CPU, then transfer to GPU.
# This avoids potential OOM on GPU for large models that
# load parameters onto GPU then make a new GPU copy into the parameter
# buffer. The GPU transfer happens implicitly in load_state_dict.
model_state = torch.load(model_path, map_location=util.device_mapping(-1))
training_state = torch.load(training_state_path, map_location=util.device_mapping(-1))
self._model.load_state_dict(model_state)
self._optimizer.load_state_dict(training_state["optimizer"])
move_optimizer_to_cuda(self._optimizer)
# We didn't used to save `validation_metric_per_epoch`, so we can't assume
# that it's part of the trainer state. If it's not there, an empty list is all
# we can do.
if "val_metric_per_epoch" not in training_state:
logger.warning("trainer state `val_metric_per_epoch` not found, using empty list")
val_metric_per_epoch: List[float] = []
else:
val_metric_per_epoch = training_state["val_metric_per_epoch"]
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split('.')[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get('batch_num_total')
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return, val_metric_per_epoch
# Requires custom from_params.
@classmethod
def from_params(cls,
model: Model,
serialization_dir: str,
iterator: DataIterator,
train_data: Iterable[Instance],
validation_data: Optional[Iterable[Instance]],
params: Params,
validation_iterator: DataIterator = None) -> 'Trainer':
patience = params.pop_int("patience", None)
validation_metric = params.pop("validation_metric", "-loss")
num_epochs = params.pop_int("num_epochs", 20)
cuda_device = params.pop_int("cuda_device", -1)
grad_norm = params.pop_float("grad_norm", None)
grad_clipping = params.pop_float("grad_clipping", None)
lr_scheduler_params = params.pop("learning_rate_scheduler", None)
if cuda_device >= 0:
model = model.cuda(cuda_device)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))
if lr_scheduler_params:
scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
else:
scheduler = None
num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
keep_serialized_model_every_num_seconds = params.pop_int(
"keep_serialized_model_every_num_seconds", None)
model_save_interval = params.pop_float("model_save_interval", None)
summary_interval = params.pop_int("summary_interval", 100)
histogram_interval = params.pop_int("histogram_interval", None)
params.assert_empty(cls.__name__)
return Trainer(model, optimizer, iterator,
train_data, validation_data,
patience=patience,
validation_metric=validation_metric,
validation_iterator=validation_iterator,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=scheduler,
num_serialized_models_to_keep=num_serialized_models_to_keep,
keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,
model_save_interval=model_save_interval,
summary_interval=summary_interval,
histogram_interval=histogram_interval)
| [
"torch.nn.parallel.replicate",
"torch.no_grad",
"torch.save",
"torch.nn.parallel.scatter_gather.scatter_kwargs",
"torch.tensor",
"torch.nn.parallel.parallel_apply"
] | 0.4.0 | albert-dot-ai/allennlp | 580dc8b0e2c6491d4d75b54c3b15b34b462e0c67 |
1.9 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from typing import List, Tuple, Optional
import fastmri
import torch
import torch.nn as nn
import torch.nn.functional as F
from fastmri.data import transforms
from .unet import Unet
class NormUnet(nn.Module):
"""
Normalized U-Net model.
This is the same as a regular U-Net, but with normalization applied to the
input before the U-Net. This keeps the values more numerically stable
during training.
"""
def __init__(
self,
chans: int,
num_pools: int,
in_chans: int = 2,
out_chans: int = 2,
drop_prob: float = 0.0,
):
"""
Args:
chans: Number of output channels of the first convolution layer.
num_pools: Number of down-sampling and up-sampling layers.
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
drop_prob: Dropout probability.
"""
super().__init__()
self.unet = Unet(
in_chans=in_chans,
out_chans=out_chans,
chans=chans,
num_pool_layers=num_pools,
drop_prob=drop_prob,
)
def complex_to_chan_dim(self, x: torch.Tensor) -> torch.Tensor:
b, c, h, w, two = x.shape
assert two == 2
return x.permute(0, 4, 1, 2, 3).reshape(b, 2 * c, h, w)
def chan_complex_to_last_dim(self, x: torch.Tensor) -> torch.Tensor:
b, c2, h, w = x.shape
assert c2 % 2 == 0
c = c2 // 2
return x.view(b, 2, c, h, w).permute(0, 2, 3, 4, 1).contiguous()
def norm(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# group norm
b, c, h, w = x.shape
x = x.view(b, 2, c // 2 * h * w)
mean = x.mean(dim=2).view(b, 2, 1, 1)
std = x.std(dim=2).view(b, 2, 1, 1)
x = x.view(b, c, h, w)
return (x - mean) / std, mean, std
def unnorm(
self, x: torch.Tensor, mean: torch.Tensor, std: torch.Tensor
) -> torch.Tensor:
return x * std + mean
def pad(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, Tuple[List[int], List[int], int, int]]:
_, _, h, w = x.shape
w_mult = ((w - 1) | 15) + 1
h_mult = ((h - 1) | 15) + 1
w_pad = [math.floor((w_mult - w) / 2), math.ceil((w_mult - w) / 2)]
h_pad = [math.floor((h_mult - h) / 2), math.ceil((h_mult - h) / 2)]
# TODO: fix this type when PyTorch fixes theirs
# the documentation lies - this actually takes a list
# https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py#L3457
# https://github.com/pytorch/pytorch/pull/16949
x = F.pad(x, w_pad + h_pad)
return x, (h_pad, w_pad, h_mult, w_mult)
def unpad(
self,
x: torch.Tensor,
h_pad: List[int],
w_pad: List[int],
h_mult: int,
w_mult: int,
) -> torch.Tensor:
return x[..., h_pad[0] : h_mult - h_pad[1], w_pad[0] : w_mult - w_pad[1]]
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not x.shape[-1] == 2:
raise ValueError("Last dimension must be 2 for complex.")
# get shapes for unet and normalize
x = self.complex_to_chan_dim(x)
x, mean, std = self.norm(x)
x, pad_sizes = self.pad(x)
x = self.unet(x)
# get shapes back and unnormalize
x = self.unpad(x, *pad_sizes)
x = self.unnorm(x, mean, std)
x = self.chan_complex_to_last_dim(x)
return x
class SensitivityModel(nn.Module):
"""
Model for learning sensitivity estimation from k-space data.
This model applies an IFFT to multichannel k-space data and then a U-Net
to the coil images to estimate coil sensitivities. It can be used with the
end-to-end variational network.
"""
def __init__(
self,
chans: int,
num_pools: int,
in_chans: int = 2,
out_chans: int = 2,
drop_prob: float = 0.0,
mask_center: bool = True,
):
"""
Args:
chans: Number of output channels of the first convolution layer.
num_pools: Number of down-sampling and up-sampling layers.
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
drop_prob: Dropout probability.
mask_center: Whether to mask center of k-space for sensitivity map
calculation.
"""
super().__init__()
self.mask_center = mask_center
self.norm_unet = NormUnet(
chans,
num_pools,
in_chans=in_chans,
out_chans=out_chans,
drop_prob=drop_prob,
)
def chans_to_batch_dim(self, x: torch.Tensor) -> Tuple[torch.Tensor, int]:
b, c, h, w, comp = x.shape
return x.view(b * c, 1, h, w, comp), b
def batch_chans_to_chan_dim(self, x: torch.Tensor, batch_size: int) -> torch.Tensor:
bc, _, h, w, comp = x.shape
c = bc // batch_size
return x.view(batch_size, c, h, w, comp)
def divide_root_sum_of_squares(self, x: torch.Tensor) -> torch.Tensor:
return x / fastmri.rss_complex(x, dim=1).unsqueeze(-1).unsqueeze(1)
def get_pad_and_num_low_freqs(
self, mask: torch.Tensor, num_low_frequencies: Optional[int] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
if num_low_frequencies is None:
# get low frequency line locations and mask them out
squeezed_mask = mask[:, 0, 0, :, 0].to(torch.int8)
cent = squeezed_mask.shape[1] // 2
# running argmin returns the first non-zero
left = torch.argmin(squeezed_mask[:, :cent].flip(1), dim=1)
right = torch.argmin(squeezed_mask[:, cent:], dim=1)
num_low_frequencies_tensor = torch.max(
2 * torch.min(left, right), torch.ones_like(left)
) # force a symmetric center unless 1
else:
num_low_frequencies_tensor = num_low_frequencies * torch.ones(
mask.shape[0], dtype=mask.dtype, device=mask.device
)
pad = (mask.shape[-2] - num_low_frequencies_tensor + 1) // 2
return pad, num_low_frequencies_tensor
def forward(
self,
masked_kspace: torch.Tensor,
mask: torch.Tensor,
num_low_frequencies: Optional[int] = None,
) -> torch.Tensor:
if self.mask_center:
pad, num_low_freqs = self.get_pad_and_num_low_freqs(
mask, num_low_frequencies
)
masked_kspace = transforms.batched_mask_center(
masked_kspace, pad, pad + num_low_freqs
)
# convert to image space
images, batches = self.chans_to_batch_dim(fastmri.ifft2c(masked_kspace))
# estimate sensitivities
return self.divide_root_sum_of_squares(
self.batch_chans_to_chan_dim(self.norm_unet(images), batches)
)
class VarNet(nn.Module):
"""
A full variational network model.
This model applies a combination of soft data consistency with a U-Net
regularizer. To use non-U-Net regularizers, use VarNetBlock.
"""
def __init__(
self,
num_cascades: int = 12,
sens_chans: int = 8,
sens_pools: int = 4,
chans: int = 18,
pools: int = 4,
mask_center: bool = True,
):
"""
Args:
num_cascades: Number of cascades (i.e., layers) for variational
network.
sens_chans: Number of channels for sensitivity map U-Net.
sens_pools Number of downsampling and upsampling layers for
sensitivity map U-Net.
chans: Number of channels for cascade U-Net.
pools: Number of downsampling and upsampling layers for cascade
U-Net.
mask_center: Whether to mask center of k-space for sensitivity map
calculation.
"""
super().__init__()
self.sens_net = SensitivityModel(
chans=sens_chans,
num_pools=sens_pools,
mask_center=mask_center,
)
self.cascades = nn.ModuleList(
[VarNetBlock(NormUnet(chans, pools)) for _ in range(num_cascades)]
)
def forward(
self,
masked_kspace: torch.Tensor,
mask: torch.Tensor,
num_low_frequencies: Optional[int] = None,
) -> torch.Tensor:
sens_maps = self.sens_net(masked_kspace, mask, num_low_frequencies)
kspace_pred = masked_kspace.clone()
for cascade in self.cascades:
kspace_pred = cascade(kspace_pred, masked_kspace, mask, sens_maps)
return fastmri.rss(fastmri.complex_abs(fastmri.ifft2c(kspace_pred)), dim=1)
class VarNetBlock(nn.Module):
"""
Model block for end-to-end variational network.
This model applies a combination of soft data consistency with the input
model as a regularizer. A series of these blocks can be stacked to form
the full variational network.
"""
def __init__(self, model: nn.Module):
"""
Args:
model: Module for "regularization" component of variational
network.
"""
super().__init__()
self.model = model
self.dc_weight = nn.Parameter(torch.ones(1))
def sens_expand(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
return fastmri.fft2c(fastmri.complex_mul(x, sens_maps))
def sens_reduce(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
x = fastmri.ifft2c(x)
return fastmri.complex_mul(x, fastmri.complex_conj(sens_maps)).sum(
dim=1, keepdim=True
)
def forward(
self,
current_kspace: torch.Tensor,
ref_kspace: torch.Tensor,
mask: torch.Tensor,
sens_maps: torch.Tensor,
) -> torch.Tensor:
zero = torch.zeros(1, 1, 1, 1, 1).to(current_kspace)
soft_dc = torch.where(mask, current_kspace - ref_kspace, zero) * self.dc_weight
model_term = self.sens_expand(
self.model(self.sens_reduce(current_kspace, sens_maps)), sens_maps
)
return current_kspace - soft_dc - model_term
| [
"torch.zeros",
"torch.min",
"torch.argmin",
"torch.ones",
"torch.ones_like",
"torch.nn.functional.pad",
"torch.where"
] | 1.9.0 | vigsivan/fastMRI | 0f6c4c0176ff74bf2761d20ec62facb01c9038f8 |
1.13 | import csv
import decimal
import os
import threading
import time
from typing import List
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed import rpc
from .trpc_server import TRPCCOMMServicer
from ..base_com_manager import BaseCommunicationManager
from ..message import Message
from ..observer import Observer
import logging
lock = threading.Lock()
WORKER = "worker{}"
class TRPCCommManager(BaseCommunicationManager):
def __init__(
self,
trpc_master_config_path,
process_id=0,
world_size=0,
):
logging.info("using TRPC backend")
with open(trpc_master_config_path, newline="") as csv_file:
csv_reader = csv.reader(csv_file)
# skip header line
next(csv_reader)
master_address, master_port = next(csv_reader)
self.master_address = master_address
self.master_port = master_port
self.process_id = process_id
self.world_size = world_size
self._observers: List[Observer] = []
if process_id == 0:
self.node_type = "server"
else:
self.node_type = "client"
print(f"Worker rank {process_id} initializing RPC")
self.trpc_servicer = TRPCCOMMServicer(
master_address, master_port, self.world_size, process_id
)
logging.info(os.getcwd())
os.environ["MASTER_ADDR"] = self.master_address
os.environ["MASTER_PORT"] = self.master_port
self._init_torch_rpc_tp(
master_address, master_port, process_id, self.world_size
)
self.is_running = True
print("server started. master address: " + str(master_address))
def _init_torch_rpc_pg(
self,
master_addr,
master_port,
worker_idx,
worker_num,
):
# https://github.com/pytorch/pytorch/issues/55615
# [BC-Breaking][RFC] Retire ProcessGroup Backend for RPC #55615
str_init_method = "tcp://" + str(master_addr) + ":" + str(master_port)
logging.info("str_init_method = {}".format(str_init_method))
options = rpc.ProcessGroupRpcBackendOptions(
num_send_recv_threads=4, init_method=str_init_method, rpc_timeout=60.0
)
rpc.init_rpc(
WORKER.format(worker_idx),
backend=dist.rpc.BackendType.PROCESS_GROUP,
rank=worker_idx,
world_size=worker_num,
rpc_backend_options=options,
)
# torch.distributed.rpc.init_rpc('worker', rank=self.global_rank, world_size=self.world_size)
logging.info("_init_rpc_with_process_group finished.")
def _init_torch_rpc_tp(
self,
master_addr,
master_port,
worker_idx,
worker_num,
):
# https://github.com/pytorch/pytorch/issues/55615
# [BC-Breaking][RFC] Retire ProcessGroup Backend for RPC #55615
str_init_method = "tcp://" + str(master_addr) + ":10000"
logging.info("str_init_method = {}".format(str_init_method))
options = rpc.TensorPipeRpcBackendOptions(
num_worker_threads=16,
rpc_timeout=1800,
init_method=str_init_method,
_transports=["uv"],
)
rpc.init_rpc(
WORKER.format(worker_idx),
backend=rpc.BackendType.TENSORPIPE,
rank=worker_idx,
world_size=worker_num,
rpc_backend_options=options,
)
logging.info("_init_torch_rpc_tp finished.")
def send_message(self, msg: Message):
receiver_id = msg.get_receiver_id()
logging.info("sending message to {}".format(receiver_id))
# Should I wait?
rpc.rpc_sync(
WORKER.format(receiver_id),
TRPCCOMMServicer.sendMessage,
args=(self.process_id, msg),
)
logging.debug("sent")
def add_observer(self, observer: Observer):
self._observers.append(observer)
def remove_observer(self, observer: Observer):
self._observers.remove(observer)
def handle_receive_message(self):
thread = threading.Thread(target=self.message_handling_subroutine)
thread.start()
def message_handling_subroutine(self):
while self.is_running:
if self.trpc_servicer.message_q.qsize() > 0:
lock.acquire()
msg = self.trpc_servicer.message_q.get()
self.notify(msg)
lock.release()
return
def stop_receive_message(self):
rpc.shutdown()
self.is_running = False
def notify(self, message: Message):
msg_type = message.get_type()
for observer in self._observers:
observer.receive_message(msg_type, message)
def run_worker(rank, world_size):
r"""
A wrapper function that initializes RPC, calls the function, and shuts down
RPC.
"""
if rank == 1:
com_manager_client = TRPCCommManager(
"./trpc_master_config.csv", rank, world_size
)
start = time.time()
tensor = torch.ones(1000, 1000)
message = Message(type="test", sender_id=rank, receiver_id="1")
message.add_params("THE_TENSOR", tensor)
TRPCCOMMServicer.sendMessage("worker0", message)
message_values = []
message = Message(type="test", sender_id=rank, receiver_id="1")
message2 = Message(type="test", sender_id=rank, receiver_id="1")
message.add_params("THE_TENSOR", tensor)
for i in range(100):
print("###############################")
print("Measuring for Single Message")
for size in [100, 1000, 10000]:
# for size in [100, 1000]:
print(f"======= size = {size} =====")
tensor = torch.ones(size, size)
start = time.time()
TRPCCOMMServicer.sendMessageTest1("worker0", message)
end = time.time()
duration = end - start
message_values.append(duration)
# print(f"Message tensor size={size} duration={str(duration)}", flush=True)
print("###############################")
print("Measuring for Message with separate Tensor")
sinle_tensor_values = []
start = time.time()
for size in [100, 1000, 10000]:
# for size in [100, 1000]:
print(f"======= size = {size} =====")
tensor = torch.ones(size, size)
# message = Message(type="test", sender_id=rank, receiver_id="1")
# message.add_params("THE_TENSOR", tensor)
start = time.time()
TRPCCOMMServicer.sendMessageTest2(
"worker0", message2.get_params(), tensor
)
end = time.time()
duration = end - start
# print(f"Single tensor size={size} duration={str(duration)}", flush=True)
sinle_tensor_values.append(duration)
print(
"mean message: "
+ str(decimal.Decimal(sum(message_values) / len(message_values)))
)
print(
"mean single tensor: "
+ str(decimal.Decimal(sum(sinle_tensor_values) / len(sinle_tensor_values)))
)
# ret = rpc.rpc_sync("worker1", TRPCCOMMServicer., args=(torch.ones(2), torch.ones(2)))
else:
# parameter server does nothing
com_manager_client = TRPCCommManager(
"./trpc_master_config.csv", rank, world_size
)
rpc.shutdown()
if __name__ == "__main__":
world_size = 2
# run_worker(0,1)
mp.spawn(run_worker, args=(world_size,), nprocs=world_size, join=True)
| [
"torch.distributed.rpc.TensorPipeRpcBackendOptions",
"torch.multiprocessing.spawn",
"torch.distributed.rpc.ProcessGroupRpcBackendOptions",
"torch.ones",
"torch.distributed.rpc.shutdown"
] | 1.13.1 | eliaskousk/FedML | e30d5dd3cc84c8a369c828a6f6ef097b3cf67b1a |
1.3 | # General structure from https://github.com/pytorch/examples/blob/master/mnist/main.py
from __future__ import print_function
import argparse
import os
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import CosineAnnealingLR
import torch.autograd as autograd
args = None
class GetSubnet(autograd.Function):
@staticmethod
def forward(ctx, scores, k):
# Get the supermask by sorting the scores and using the top k%
out = scores.clone()
_, idx = scores.flatten().sort()
j = int((1 - k) * scores.numel())
# flat_out and out access the same memory.
flat_out = out.flatten()
flat_out[idx[:j]] = 0
flat_out[idx[j:]] = 1
return out
@staticmethod
def backward(ctx, g):
# send the gradient g straight-through on the backward pass.
return g, None
class SupermaskConv(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# initialize the scores
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
# NOTE: initialize the weights like this.
nn.init.kaiming_normal_(self.weight, mode="fan_in", nonlinearity="relu")
# NOTE: turn the gradient on the weights off
self.weight.requires_grad = False
def forward(self, x):
subnet = GetSubnet.apply(self.scores.abs(), args.sparsity)
w = self.weight * subnet
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
class SupermaskLinear(nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# initialize the scores
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
# NOTE: initialize the weights like this.
nn.init.kaiming_normal_(self.weight, mode="fan_in", nonlinearity="relu")
# NOTE: turn the gradient on the weights off
self.weight.requires_grad = False
def forward(self, x):
subnet = GetSubnet.apply(self.scores.abs(), args.sparsity)
w = self.weight * subnet
return F.linear(x, w, self.bias)
return x
# NOTE: not used here but we use NON-AFFINE Normalization!
# So there is no learned parameters for your nomralization layer.
class NonAffineBatchNorm(nn.BatchNorm2d):
def __init__(self, dim):
super(NonAffineBatchNorm, self).__init__(dim, affine=False)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = SupermaskConv(1, 32, 3, 1, bias=False)
self.conv2 = SupermaskConv(32, 64, 3, 1, bias=False)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = SupermaskLinear(9216, 128, bias=False)
self.fc2 = SupermaskLinear(128, 10, bias=False)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, criterion, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(model, device, criterion, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
global args
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=0.0005, metavar='M',
help='Weight decay (default: 0.0005)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--data', type=str, default='../data', help='Location to store data')
parser.add_argument('--sparsity', type=float, default=0.5,
help='how sparse is each layer')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print (device)
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(args.data, 'mnist'), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(args.data, 'mnist'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
# NOTE: only pass the parameters where p.requires_grad == True to the optimizer! Important!
optimizer = optim.SGD(
[p for p in model.parameters() if p.requires_grad],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd,
)
criterion = nn.CrossEntropyLoss().to(device)
scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs)
for epoch in range(1, args.epochs + 1):
train(model, device, train_loader, optimizer, criterion, epoch)
test(model, device, criterion, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.flatten",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad",
"torch.nn.init.kaiming_normal_",
"torch.nn.functional.log_softmax",
"torch.manual_seed",
"torch.nn.Dropout2d",
"torch.nn.functional.linear",
"torch.cuda.is_available",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.conv2d",
"torch.nn.CrossEntropyLoss"
] | 1.3.0 | weizhonz/hid | 3ee3aeeaf12baeadf3d85c1bb86296073bba3fbe |
1.6 | import dataclasses
import itertools
from typing import List, Optional, Tuple
import nltk
import torch
from .downloader import load_trained_model
from ..parse_base import BaseParser, BaseInputExample
from ..ptb_unescape import ptb_unescape, guess_space_after
TOKENIZER_LOOKUP = {
"en": "english",
"de": "german",
"fr": "french",
"pl": "polish",
"sv": "swedish",
}
LANGUAGE_GUESS = {
"ar": ("X", "XP", "WHADVP", "WHNP", "WHPP"),
"zh": ("VSB", "VRD", "VPT", "VNV"),
"en": ("WHNP", "WHADJP", "SINV", "SQ"),
"de": ("AA", "AP", "CCP", "CH", "CNP", "VZ"),
"fr": ("P+", "P+D+", "PRO+", "PROREL+"),
"he": ("PREDP", "SYN_REL", "SYN_yyDOT"),
"pl": ("formaczas", "znakkonca"),
"sv": ("PSEUDO", "AVP", "XP"),
}
def guess_language(label_vocab):
"""Guess parser language based on its syntactic label inventory.
The parser training scripts are designed to accept arbitrary input tree
files with minimal language-specific behavior, but at inference time we may
need to know the language identity in order to invoke other pipeline
elements, such as tokenizers.
"""
for language, required_labels in LANGUAGE_GUESS.items():
if all(label in label_vocab for label in required_labels):
return language
return None
@dataclasses.dataclass
class InputSentence(BaseInputExample):
"""Parser input for a single sentence.
At least one of `words` and `escaped_words` is required for each input
sentence. The remaining fields are optional: the parser will attempt to
derive the value for any missing fields using the fields that are provided.
`words` and `space_after` together form a reversible tokenization of the
input text: they represent, respectively, the Unicode text for each word and
an indicator for whether the word is followed by whitespace. These are used
as inputs by the parser.
`tags` is a list of part-of-speech tags, if available prior to running the
parser. The parser does not actually use these tags as input, but it will
pass them through to its output. If `tags` is None, the parser will perform
its own part of speech tagging (if the parser was not trained to also do
tagging, "UNK" part-of-speech tags will be used in the output instead).
`escaped_words` are the representations of each leaf to use in the output
tree. If `words` is provided, `escaped_words` will not be used by the neural
network portion of the parser, and will only be incorporated when
constructing the output tree. Therefore, `escaped_words` may be used to
accommodate any dataset-specific text encoding, such as transliteration.
Here is an example of the differences between these fields for English PTB:
(raw text): "Fly safely."
words: " Fly safely . "
space_after: False True False False False
tags: `` VB RB . ''
escaped_words: `` Fly safely . ''
"""
words: Optional[List[str]] = None
space_after: Optional[List[bool]] = None
tags: Optional[List[str]] = None
escaped_words: Optional[List[str]] = None
@property
def tree(self):
return None
def leaves(self):
return self.escaped_words
def pos(self):
if self.tags is not None:
return list(zip(self.escaped_words, self.tags))
else:
return [(word, "UNK") for word in self.escaped_words]
class Parser:
"""Berkeley Neural Parser (benepar), integrated with NLTK.
Use this class to apply the Berkeley Neural Parser to pre-tokenized datasets
and treebanks, or when integrating the parser into an NLP pipeline that
already performs tokenization, sentence splitting, and (optionally)
part-of-speech tagging. For parsing starting with raw text, it is strongly
encouraged that you use spaCy and benepar.BeneparComponent instead.
Sample usage:
>>> parser = benepar.Parser("benepar_en3")
>>> input_sentence = benepar.InputSentence(
words=['"', 'Fly', 'safely', '.', '"'],
space_after=[False, True, False, False, False],
tags=['``', 'VB', 'RB', '.', "''"],
escaped_words=['``', 'Fly', 'safely', '.', "''"],
)
>>> parser.parse(input_sentence)
Not all fields of benepar.InputSentence are required, but at least one of
`words` and `escaped_words` must not be None. The parser will attempt to
guess the value for missing fields. For example,
>>> input_sentence = benepar.InputSentence(
words=['"', 'Fly', 'safely', '.', '"'],
)
>>> parser.parse(input_sentence)
Although this class is primarily designed for use with data that has already
been tokenized, to help with interactive use and debugging it also accepts
simple text string inputs. However, using this class to parse from raw text
is STRONGLY DISCOURAGED for any application where parsing accuracy matters.
When parsing from raw text, use spaCy and benepar.BeneparComponent instead.
The reason is that parser models do not ship with a tokenizer or sentence
splitter, and some models may not include a part-of-speech tagger either. A
toolkit must be used to fill in these pipeline components, and spaCy
outperforms NLTK in all of these areas (sometimes by a large margin).
>>> parser.parse('"Fly safely."') # For debugging/interactive use only.
"""
def __init__(self, name, batch_size=64, language_code=None):
"""Load a trained parser model.
Args:
name (str): Model name, or path to pytorch saved model
batch_size (int): Maximum number of sentences to process per batch
language_code (str, optional): language code for the parser (e.g.
'en', 'he', 'zh', etc). Our official trained models will set
this automatically, so this argument is only needed if training
on new languages or treebanks.
"""
self._parser = load_trained_model(name)
if torch.cuda.is_available():
self._parser.cuda()
if language_code is not None:
self._language_code = language_code
else:
self._language_code = guess_language(self._parser.config["label_vocab"])
self._tokenizer_lang = TOKENIZER_LOOKUP.get(self._language_code, None)
self.batch_size = batch_size
def parse(self, sentence):
"""Parse a single sentence
Args:
sentence (InputSentence or List[str] or str): Sentence to parse.
If the input is of List[str], it is assumed to be a sequence of
words and will behave the same as only setting the `words` field
of InputSentence. If the input is of type str, the sentence will
be tokenized using the default NLTK tokenizer (not recommended:
if parsing from raw text, use spaCy and benepar.BeneparComponent
instead).
Returns:
nltk.Tree
"""
return list(self.parse_sents([sentence]))[0]
def parse_sents(self, sents):
"""Parse multiple sentences in batches.
Args:
sents (Iterable[InputSentence]): An iterable of sentences to be
parsed. `sents` may also be a string, in which case it will be
segmented into sentences using the default NLTK sentence
splitter (not recommended: if parsing from raw text, use spaCy
and benepar.BeneparComponent instead). Otherwise, each element
of `sents` will be treated as a sentence. The elements of
`sents` may also be List[str] or str: see Parser.parse() for
documentation regarding these cases.
Yields:
nltk.Tree objects, one per input sentence.
"""
if isinstance(sents, str):
if self._tokenizer_lang is None:
raise ValueError(
"No tokenizer available for this language. "
"Please split into individual sentences and tokens "
"before calling the parser."
)
sents = nltk.sent_tokenize(sents, self._tokenizer_lang)
end_sentinel = object()
for batch_sents in itertools.zip_longest(
*([iter(sents)] * self.batch_size), fillvalue=end_sentinel
):
batch_inputs = []
for sent in batch_sents:
if sent is end_sentinel:
break
elif isinstance(sent, str):
if self._tokenizer_lang is None:
raise ValueError(
"No word tokenizer available for this language. "
"Please tokenize before calling the parser."
)
escaped_words = nltk.word_tokenize(sent, self._tokenizer_lang)
sent = InputSentence(escaped_words=escaped_words)
elif isinstance(sent, (list, tuple)):
sent = InputSentence(words=sent)
elif not isinstance(sent, InputSentence):
raise ValueError(
"Sentences must be one of: InputSentence, list, tuple, or str"
)
batch_inputs.append(self._with_missing_fields_filled(sent))
for inp, output in zip(
batch_inputs, self._parser.parse(batch_inputs, return_compressed=True)
):
# If pos tags are provided as input, ignore any tags predicted
# by the parser.
if inp.tags is not None:
output = output.without_predicted_tags()
yield output.to_tree(
inp.pos(),
self._parser.decoder.label_from_index,
self._parser.tag_from_index,
)
def _with_missing_fields_filled(self, sent):
if not isinstance(sent, InputSentence):
raise ValueError("Input is not an instance of InputSentence")
if sent.words is None and sent.escaped_words is None:
raise ValueError("At least one of words or escaped_words is required")
elif sent.words is None:
sent = dataclasses.replace(sent, words=ptb_unescape(sent.escaped_words))
elif sent.escaped_words is None:
escaped_words = [
word.replace("(", "-LRB-")
.replace(")", "-RRB-")
.replace("{", "-LCB-")
.replace("}", "-RCB-")
.replace("[", "-LSB-")
.replace("]", "-RSB-")
for word in sent.words
]
sent = dataclasses.replace(sent, escaped_words=escaped_words)
else:
if len(sent.words) != len(sent.escaped_words):
raise ValueError(
f"Length of words ({len(sent.words)}) does not match "
f"escaped_words ({len(sent.escaped_words)})"
)
if sent.space_after is None:
if self._language_code == "zh":
space_after = [False for _ in sent.words]
elif self._language_code in ("ar", "he"):
space_after = [True for _ in sent.words]
else:
space_after = guess_space_after(sent.words)
sent = dataclasses.replace(sent, space_after=space_after)
elif len(sent.words) != len(sent.space_after):
raise ValueError(
f"Length of words ({len(sent.words)}) does not match "
f"space_after ({len(sent.space_after)})"
)
assert len(sent.words) == len(sent.escaped_words) == len(sent.space_after)
return sent
| [
"torch.cuda.is_available"
] | 1.6.0 | thomaslu2000/Incremental-Parsing-Representations | 1b0ec638e85f0e521a12b53d8b309191c40fe0d3 |
1.5 | # Copyright Contributors to the Pyro project.
# Copyright (c) 2020, YosefLab.
# SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
"""
The data preprocessing code in this script is adapted from:
https://github.com/YosefLab/scvi-tutorials/blob/50dd3269abfe0c375ec47114f2c20725a016736f/seed_labeling.ipynb
"""
import math
import numpy as np
from scipy import sparse
import torch
import torch.nn as nn
class BatchDataLoader(object):
"""
This custom DataLoader serves mini-batches that are either fully-observed (i.e. labeled)
or partially-observed (i.e. unlabeled) but never mixed.
"""
def __init__(self, data_x, data_y, batch_size, num_classes=4, missing_label=-1):
super().__init__()
self.data_x = data_x
self.data_y = data_y
self.batch_size = batch_size
self.num_classes = num_classes
self.unlabeled = torch.where(data_y == missing_label)[0]
self.num_unlabeled = self.unlabeled.size(0)
self.num_unlabeled_batches = math.ceil(self.num_unlabeled / self.batch_size)
self.labeled = torch.where(data_y != missing_label)[0]
self.num_labeled = self.labeled.size(0)
self.num_labeled_batches = math.ceil(self.num_labeled / self.batch_size)
assert self.data_x.size(0) == self.data_y.size(0)
assert len(self) > 0
@property
def size(self):
return self.data_x.size(0)
def __len__(self):
return self.num_unlabeled_batches + self.num_labeled_batches
def _sample_batch_indices(self):
batch_order = torch.randperm(len(self)).tolist()
unlabeled_idx = self.unlabeled[torch.randperm(self.num_unlabeled)]
labeled_idx = self.labeled[torch.randperm(self.num_labeled)]
slices = []
for i in range(self.num_unlabeled_batches):
_slice = unlabeled_idx[i * self.batch_size: (i + 1) * self.batch_size]
slices.append((_slice, False))
for i in range(self.num_labeled_batches):
_slice = labeled_idx[i * self.batch_size: (i + 1) * self.batch_size]
slices.append((_slice, True))
return slices, batch_order
def __iter__(self):
slices, batch_order = self._sample_batch_indices()
for i in range(len(batch_order)):
_slice = slices[batch_order[i]]
if _slice[1]:
# labeled
yield self.data_x[_slice[0]], \
nn.functional.one_hot(self.data_y[_slice[0]], num_classes=self.num_classes)
else:
# unlabeled
yield self.data_x[_slice[0]], None
def _get_score(normalized_adata, gene_set):
"""
Returns the score per cell given a dictionary of + and - genes
"""
score = np.zeros(normalized_adata.n_obs)
for gene in gene_set['positive']:
expression = np.array(normalized_adata[:, gene].X)
score += expression.flatten()
for gene in gene_set['negative']:
expression = np.array(normalized_adata[:, gene].X)
score -= expression.flatten()
return score
def _get_cell_mask(normalized_adata, gene_set):
"""
Calculates the score per cell for a list of genes, then returns a mask for
the cells with the highest 50 scores.
"""
score = _get_score(normalized_adata, gene_set)
cell_idx = score.argsort()[-50:]
mask = np.zeros(normalized_adata.n_obs)
mask[cell_idx] = 1
return mask.astype(bool)
def get_data(dataset="pbmc", batch_size=100, cuda=False):
"""
Does the necessary preprocessing and returns a BatchDataLoader for the PBMC dataset.
"""
assert dataset in ['pbmc', 'mock']
# create mock dataset for CI
if dataset == 'mock':
num_genes = 17
num_data = 200
X = torch.distributions.Poisson(rate=10.0).sample(sample_shape=(num_data, num_genes))
Y = torch.zeros(num_data, dtype=torch.long)
Y[50:100] = 1
Y[100:] = -1
if cuda:
X, Y = X.cuda(), Y.cuda()
return BatchDataLoader(X, Y, batch_size), num_genes, 2.0, 1.0, None
import scvi
import scanpy as sc
adata = scvi.data.purified_pbmc_dataset(subset_datasets=["regulatory_t", "naive_t",
"memory_t", "naive_cytotoxic"])
gene_subset = ["CD4", "FOXP3", "TNFRSF18", "IL2RA", "CTLA4", "CD44", "TCF7",
"CD8B", "CCR7", "CD69", "PTPRC", "S100A4"]
normalized = adata.copy()
sc.pp.normalize_total(normalized, target_sum=1e4)
sc.pp.log1p(normalized)
normalized = normalized[:, gene_subset].copy()
sc.pp.scale(normalized)
# hand curated list of genes for identifying ground truth
cd4_reg_geneset = {"positive": ["TNFRSF18", "CTLA4", "FOXP3", "IL2RA"], "negative": ["S100A4", "PTPRC", "CD8B"]}
cd8_naive_geneset = {"positive": ["CD8B", "CCR7"], "negative": ["CD4"]}
cd4_naive_geneset = {"positive": ["CCR7", "CD4"], "negative": ["S100A4", "PTPRC", "FOXP3", "IL2RA", "CD69"]}
cd4_mem_geneset = {"positive": ["S100A4"], "negative": ["IL2RA", "FOXP3", "TNFRSF18", "CCR7"]}
cd4_reg_mask = _get_cell_mask(normalized, cd4_reg_geneset)
cd8_naive_mask = _get_cell_mask(normalized, cd8_naive_geneset)
cd4_naive_mask = _get_cell_mask(normalized, cd4_naive_geneset)
cd4_mem_mask = _get_cell_mask(normalized, cd4_mem_geneset)
# these will be our seed labels
seed_labels = -np.ones(cd4_mem_mask.shape[0])
seed_labels[cd8_naive_mask] = 0 # "CD8 Naive T cell"
seed_labels[cd4_naive_mask] = 1 # "CD4 Naive T cell"
seed_labels[cd4_mem_mask] = 2 # "CD4 Memory T cell"
seed_labels[cd4_reg_mask] = 3 # "CD4 Regulatory T cell"
# this metadata will be used for plotting
seed_colors = ['lightgray'] * seed_labels.shape[0]
seed_sizes = [0.05] * seed_labels.shape[0]
for i in range(len(seed_colors)):
if seed_labels[i] == 0:
seed_colors[i] = 'lightcoral'
elif seed_labels[i] == 1:
seed_colors[i] = 'limegreen'
elif seed_labels[i] == 2:
seed_colors[i] = 'deepskyblue'
elif seed_labels[i] == 3:
seed_colors[i] = 'mediumorchid'
if seed_labels[i] != -1:
seed_sizes[i] = 25
adata.obs['seed_labels'] = seed_labels
adata.obs['seed_colors'] = seed_colors
adata.obs['seed_marker_sizes'] = seed_sizes
# filter out non-variable genes
adata_filter = adata.copy()
sc.pp.normalize_per_cell(adata_filter, counts_per_cell_after=1e4)
sc.pp.log1p(adata_filter)
sc.pp.highly_variable_genes(adata_filter, min_mean=0.0125, max_mean=3.0, min_disp=0.5)
highly_variable_genes = adata_filter.var["highly_variable"]
Y = torch.from_numpy(seed_labels).long()
X = torch.from_numpy(sparse.csr_matrix.todense(adata.X)).float()
# the prior mean and scale for the log count latent variable `l`
# is set using the empirical mean and variance of the observed log counts
log_counts = X.sum(-1).log()
l_mean, l_scale = log_counts.mean().item(), log_counts.std().item()
if cuda:
X, Y = X.cuda(), Y.cuda()
# subsample and remove ~50% of the unlabeled cells
labeled = torch.where(Y != -1)[0]
unlabeled = torch.where(Y == -1)[0]
unlabeled = unlabeled[torch.randperm(unlabeled.size(0))[:19800]]
idx = torch.cat([labeled, unlabeled])
num_genes = X.size(-1)
adata = adata[idx.data.cpu().numpy(), highly_variable_genes]
adata.raw = adata
return BatchDataLoader(X[idx], Y[idx], batch_size), num_genes, l_mean, l_scale, adata
| [
"torch.zeros",
"torch.cat",
"torch.nn.functional.one_hot",
"torch.randperm",
"torch.from_numpy",
"torch.distributions.Poisson",
"torch.where"
] | 1.5.0 | akihironitta/pyro | 0ab6e474330942ff4ec2a87a6cc0c671943fc5cd |
1.9 | import os
import glob
import random
import cv2
import numpy as np
import torch
import matplotlib.pyplot as plt
import open3d
from skimage import io, img_as_float32
from scipy import ndimage
from torch_geometric.data import Data, DataListLoader
from torch_geometric.loader import DataLoader as GraphLevelDataLoader
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import transform
from typing import List
from easydict import EasyDict
from utils import math_utils, data_utils, unit_tests
class ImageGraphTextureDataSet(Dataset):
def __init__(self, image_files, end_level, is_train, benchmark, img_size, crop_half_width, circle_radius, num_circles=4, max_items=None,
no_train_cropped=False, transform=None, random_mask=False):
self._is_train = is_train
self._benchmark = benchmark
self.img_size = img_size
self.crop_half_width = crop_half_width
self._end_level = end_level
self._transform = transform
self._no_train_cropped = no_train_cropped
self.image_files = np.array(image_files)
self.random_mask = random_mask
self.circle_radius = circle_radius
self.num_circles = num_circles
self.circle = torch.zeros((self.circle_radius * 2, self.circle_radius * 2, 1), dtype=torch.bool)
for row in range(self.circle.shape[0]):
for col in range(self.circle.shape[1]):
if abs(row - self.circle_radius) ** 2 + abs(col - self.circle_radius) ** 2 <= self.circle_radius ** 2:
self.circle[row, col] = True
self.traces_list = []
self.edge_indices_list = []
self.num_vertices_list = []
self.decimation = 2
# Build fake traces
for level in range(self._end_level):
level_img_size = self.img_size // (self.decimation ** level)
num_verties = level_img_size ** 2
self.num_vertices_list.append(num_verties)
if level > 0:
trace = np.arange(num_verties).reshape(level_img_size, level_img_size)
trace = np.repeat(trace, self.decimation, axis=1).repeat(self.decimation, axis=0)
trace = np.reshape(trace, (-1,))
#trace = torch.from_numpy(trace)
#trace = torch.cat((trace, trace + level_img_size * level_img_size), dim=0)
print(level, 'Trace shape:', trace.shape)
self.traces_list.append(trace)
# Build fake decimated edges
for level in range(self._end_level):
level_img_size = self.img_size // (self.decimation ** level)
edge_indices = self._generate_image_graph_edges(level_img_size)
#edge_indices = torch.from_numpy(edge_indices)
#edge_indices = torch.cat((edge_indices, edge_indices + level_img_size * level_img_size), dim=0)
#edge_indices = edge_indices.t().contiguous()
print(level, 'Number of edge indices:', edge_indices.shape)
self.edge_indices_list.append(edge_indices)
def _generate_image_graph_edges(self, img_size):
def double_set_add(s, a, b):
s.add((a, b))
s.add((b, a))
def get_neighbor_coords_list(r, c, max_size):
coords_list = []
# TODO: Should we add self-loops?
# Maybe not since most graph algorithms explicitly include the vertex they're operating on
#coords_list.append((r, c))
if r > 0:
coords_list.append((r - 1, c + 0))
#if c > 0:
# coords_list.append((r - 1, c - 1))
#if c < max_size - 1:
# coords_list.append((r - 1, c + 1))
if c > 0:
coords_list.append((r + 0, c - 1))
if c < max_size - 1:
coords_list.append((r + 0, c + 1))
if r < max_size - 1:
coords_list.append((r + 1, c + 0))
#if c > 0:
# coords_list.append((r + 1, c - 1))
#if c < max_size - 1:
# coords_list.append((r + 1, c + 1))
return coords_list
edge_indices = set()
for r in range(img_size):
for c in range(img_size):
index = r * img_size + c
neighbor_coords = get_neighbor_coords_list(r, c, img_size)
for neighbor_coord in neighbor_coords:
neighbor_index = neighbor_coord[0] * img_size + neighbor_coord[1]
double_set_add(edge_indices, index, neighbor_index)
edge_indices = np.asarray(list(edge_indices))
return edge_indices
def __len__(self):
return len(self.image_files)
def __getitem__(self, index: int):
img_path = self.image_files[index]
img = io.imread(img_path)
img = np.array(img)
sample = {'color': img}
if self._transform:
sample = self._transform(sample)
img = sample['color']
# Create circular masks
mask = torch.zeros((self.img_size, self.img_size, 1), dtype=torch.bool)
for i in range(self.num_circles):
if self._is_train and self.random_mask:
x_offset = int((self.img_size / 2 - self.crop_half_width) * (random.random() * 2.0 - 1.0) * 0.95)
y_offset = int((self.img_size / 2 - self.crop_half_width) * (random.random() * 2.0 - 1.0) * 0.95)
else:
x_offset = ((i % 2) * 2 - 1) * self.img_size // 4
y_offset = ((i // 2) * 2 - 1) * self.img_size // 4
row_start = self.img_size//2-self.circle_radius + x_offset
row_end = self.img_size//2+self.circle_radius + x_offset
col_start = self.img_size//2-self.circle_radius + y_offset
col_end = self.img_size//2+self.circle_radius + y_offset
mask[row_start:row_end, col_start:col_end] += self.circle
img = torch.reshape(img, (-1, 3))
mask = torch.reshape(mask, (-1, 1))
sample = data_utils.HierarchicalData(x=torch.cat([img * ~mask, mask], dim=-1),
color=img,
mask=mask,
edge_index=torch.from_numpy(self.edge_indices_list[0]).t().contiguous(),
#num_vertices=self.num_vertices_list,
)
##sample.num_vertices = torch.tensor(self.num_vertices_list)
num_vertices = [sample.x.shape[0]]
sample.num_vertices = torch.tensor(self.num_vertices_list, dtype=torch.int)
for level in range(1, self._end_level):
setattr(sample, f"hierarchy_edge_index_{level}", torch.from_numpy(self.edge_indices_list[level]).t().contiguous())
setattr(sample, f"hierarchy_trace_index_{level}", torch.from_numpy(self.traces_list[level - 1]))
num_vertices.append(int(sample[f"hierarchy_trace_index_{level}"].max() + 1))
sample.num_vertices = torch.tensor(num_vertices, dtype=torch.int)
return sample
class Normalize(object):
"""Normalize color images between [-1,1]."""
def __call__(self, sample):
color_image = sample['color']
# NOTE: Don't normalize input_image. It's just a matrix of coordinates
color_image = img_as_float32(color_image)
color_image = (color_image * 2.0) - 1
#color_image = color_image - 0.5
return {'color': color_image}
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, min_size, max_size):
# For now size is defined as the smaller size of an image
assert isinstance(min_size, int)
assert isinstance(max_size, int)
assert min_size <= max_size
self.min_size = min_size
self.max_size = max_size
def __call__(self, sample):
input_image = sample['color']
h, w = input_image.shape[:2]
output_size = np.random.randint(self.min_size, self.max_size + 1)
if isinstance(output_size, int):
if h > w:
new_h, new_w = output_size * h / w, output_size
else:
new_h, new_w = output_size, output_size * w / h
else:
new_h, new_w = output_size
new_h, new_w = int(new_h), int(new_w)
# TODO: Use pillow library for resizing images
# Nearest neighbor for input_image since we can't interpolate across discontinuities in uv coordinates
#input_image = transform.resize(input_image, (new_h, new_w))
#input_image = cv2.resize(input_image, (new_w, new_h), interpolation=cv2.INTER_NEAREST)
input_image = cv2.resize(input_image, (new_w, new_h), interpolation=cv2.INTER_AREA)
return {'color': input_image}
class CenterCrop(object):
def __init__(self, crop_size):
assert isinstance(crop_size, tuple)
self.crop_size = crop_size
def __call__(self, sample):
input_image = sample['color']
# Assuming input_image and color_image are the same shape
h, w, _ = input_image.shape
size_crop_h, size_crop_w = self.crop_size
# Get a valid starting and end positions
h_start = int((h - size_crop_h) / 2)
w_start = int((w - size_crop_w) / 2)
h_end = h_start + size_crop_h
w_end = w_start + size_crop_w
# Crop the input and target
input_image = input_image[h_start:h_end, w_start:w_end, :]
return {'color': input_image}
class RandomRotation(object):
def __init__(self):
self.angles = [0, 90, 180, 270]
def __call__(self, sample):
input_image = sample['color']
angle = random.choice(self.angles)
input_image = ndimage.rotate(input_image, angle, reshape=False, mode='constant')
return {'color': input_image}
class RandomFlip(object):
def __init__(self, flip_axis):
self.flip_axis = flip_axis
def __call__(self, sample):
input_image = sample['color']
if np.random.choice(a=[False, True]):
input_image = np.flip(input_image, axis=self.flip_axis).copy()
return {'color': input_image}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
input_image = sample['color']
# NOTE: Axis swapping is not necessary for uv coords since
# it is not an image, but rather a matrix of coordinates
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
#input_image = input_image.transpose((2, 0, 1))
return {'color': torch.from_numpy(input_image)}
class ImageGraphTextureDataLoader:
def __init__(self, config, multi_gpu):
self.config = EasyDict(config)
self.train_files = self._load(os.path.join(self.config.root_dir, 'train'))
self.val_files = self._load(os.path.join(self.config.root_dir, 'val'))
len_train_files, len_val_files = len(self.train_files), len(self.val_files)
total_num_files = len_train_files + len_val_files
frac_train_files = len_train_files / total_num_files
if 0 <= self.config.max_items <= total_num_files:
max_train_files = int(self.config.max_items * frac_train_files)
max_val_files = int(self.config.max_items * (1 - frac_train_files))
else:
max_train_files = int(total_num_files * frac_train_files)
max_val_files = int(total_num_files * (1 - frac_train_files))
self.train_files = self.train_files[:max_train_files]
self.val_files = self.val_files[:max_val_files]
transf_list_train = [
Normalize(),
Rescale(self.config.img_size, self.config.img_size),
CenterCrop((self.config.img_size, self.config.img_size)),
]
if self.config.random_augmentation:
transf_list_train += [
RandomRotation(),
RandomFlip(flip_axis=1),
]
transf_list_train.append(ToTensor())
# Build val/test transformation
transf_list_valid = [
Normalize(),
Rescale(self.config.img_size, self.config.img_size),
CenterCrop((self.config.img_size, self.config.img_size)),
#RandomFlip(flip_axis=1),
ToTensor()
]
transf_train = transforms.Compose(transf_list_train)
transf_valid = transforms.Compose(transf_list_valid)
if multi_gpu:
dataloader_class = DataListLoader
else:
dataloader_class = GraphLevelDataLoader
self.train_dataset = ImageGraphTextureDataSet(self.train_files, self.config.end_level, is_train=True,
circle_radius=self.config.circle_radius,
transform=transf_train, random_mask=self.config.random_mask,
no_train_cropped=self.config.no_train_cropped, benchmark=False,
img_size=self.config.img_size, max_items=self.config.max_items,
crop_half_width=self.config.crop_half_width)
print('train dataset len', len(self.train_dataset))
self.train_loader = dataloader_class(self.train_dataset, batch_size=self.config.train_batch_size,
shuffle=True, pin_memory=True, persistent_workers=self.config.num_workers > 0,
num_workers=self.config.num_workers)
self.sample_train_loader = dataloader_class(self.train_dataset, batch_size=self.config.train_batch_size,
shuffle=False, pin_memory=True,
num_workers=self.config.num_workers)
self.sample_train_dataset = torch.utils.data.Subset(self.train_dataset,
np.arange(min(self.config.num_static_samples,
len(self.train_dataset))))
self.sample_train_loader = dataloader_class(self.sample_train_dataset, batch_size=self.config.train_batch_size,
shuffle=False, pin_memory=True,
num_workers=self.config.num_workers)
# TODO: Update val dataset so that it doesn't have to be treated like a train dataset
# includes is_train=False and no_train_cropped=self.config.no_train_cropped
self.val_dataset = ImageGraphTextureDataSet(self.val_files, self.config.end_level, is_train=False,
circle_radius=self.config.circle_radius,
transform=transf_valid, benchmark=False,
no_train_cropped=self.config.no_train_cropped,
img_size=self.config.img_size, max_items=self.config.max_items,
crop_half_width=self.config.crop_half_width)
print('val dataset len', len(self.val_dataset))
#unit_tests.compare_train_val(self.train_colors, self.val_colors)
self.val_loader = dataloader_class(self.val_dataset, batch_size=self.config.test_batch_size, shuffle=False,
pin_memory=True, persistent_workers=self.config.num_workers > 0,
num_workers=self.config.num_workers)
self.sample_val_dataset = torch.utils.data.Subset(self.val_dataset,
np.arange(min(self.config.num_static_samples,
len(self.val_dataset))))
self.sample_val_loader = dataloader_class(self.sample_val_dataset, batch_size=self.config.test_batch_size,
shuffle=False, pin_memory=True,
num_workers=self.config.num_workers)
def _load(self, root_dir, seed=42) -> List[str]:
filenames = glob.glob(f"{root_dir}/*.png")
filenames = sorted(filenames)
random.Random(seed).shuffle(filenames)
return filenames | [
"torch.zeros",
"torch.cat",
"torch.from_numpy",
"torch.tensor",
"torch.reshape"
] | 1.9.1 | johnpeterflynn/surface-texture-inpainting-net | b2de05eaa47c9bcca53b9aee12b6012ac2c05156 |
1.3 | import os
import random
import sys
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
import ignite.distributed as idist
from ignite.engine import Events
from ignite.engine.deterministic import (
DeterministicEngine,
ReproducibleBatchSampler,
keep_random_state,
update_dataloader,
)
from ignite.utils import manual_seed
from tests.ignite.engine import BatchChecker, setup_sampler
def test_dengine_setup_seed_div_by_zero():
with pytest.raises(ValueError, match=r"iter_counter should be positive value"):
DeterministicEngine(lambda e, b: None)._setup_seed(iter_counter=0)
def test_update_dataloader():
def _test(sampler_type=None):
num_epochs = 3
total_batch_size = 4
num_iters = 17
data = torch.randint(0, 1000, size=(num_iters * total_batch_size,))
num_workers = 2
sampler, batch_size = setup_sampler(sampler_type, num_iters, total_batch_size)
dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=False,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
torch.manual_seed(12)
seen_batches = []
for i in range(num_epochs):
t = []
if sampler_type == "distributed":
sampler.set_epoch(i)
for b in dataloader:
t.append(b)
seen_batches.append(t)
sampler, batch_size = setup_sampler(sampler_type, num_iters, total_batch_size)
dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=False,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
batch_sampler = dataloader.batch_sampler
new_dataloader = update_dataloader(dataloader, ReproducibleBatchSampler(batch_sampler))
torch.manual_seed(12)
new_batches = []
for i in range(num_epochs):
t = []
if sampler_type == "distributed":
sampler.set_epoch(i)
for b in new_dataloader:
t.append(b)
new_batches.append(t)
for i in range(num_epochs):
assert all([(b1 == b2).all() for b1, b2 in zip(seen_batches[i], new_batches[i])])
_test()
_test("weighted")
_test("distributed")
def test_reproducible_batch_sampler_wrong_input():
with pytest.raises(TypeError, match=r"Argument batch_sampler should be torch.utils.data.sampler.BatchSampler"):
ReproducibleBatchSampler("abc")
def test_reproducible_batch_sampler():
data = list(range(100))
dataloader = DataLoader(data, batch_size=12, num_workers=0, shuffle=True, drop_last=True)
torch.manual_seed(12 + 0)
dataloader_ = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler))
seen_batches = []
num_epochs = 3
for i in range(num_epochs):
t = []
for b in dataloader_:
t.append(b)
seen_batches.append(t)
torch.manual_seed(12 + i + 1)
for i in range(num_epochs - 1):
for j in range(i + 1, num_epochs):
assert not all([(b1 == b2).all() for b1, b2 in zip(seen_batches[i], seen_batches[j])])
for resume_epoch in range(num_epochs):
torch.manual_seed(12 + resume_epoch)
dataloader_ = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler))
resumed_seen_batches = []
for b in dataloader_:
resumed_seen_batches.append(b)
assert all([(b1 == b2).all() for b1, b2 in zip(seen_batches[resume_epoch], resumed_seen_batches)])
def _test_keep_random_state(with_numpy):
manual_seed(54)
true_values = []
for _ in range(5):
t = [
torch.tensor([random.random()]),
torch.rand(2),
]
if with_numpy:
t.append(torch.from_numpy(np.random.rand(2)))
true_values.append(t)
@keep_random_state
def user_handler():
manual_seed(22)
_ = [
random.random(),
torch.rand(2),
]
if with_numpy:
_ = np.random.rand(2)
manual_seed(54)
res_values = []
for _ in range(5):
r = [
torch.tensor([random.random()]),
torch.rand(2),
]
if with_numpy:
r.append(torch.from_numpy(np.random.rand(2)))
res_values.append(r)
user_handler()
for a, b in zip(true_values, res_values):
for i, j in zip(a, b):
assert (i == j).all()
def test_keep_random_state():
_test_keep_random_state(with_numpy=True)
def test_keep_random_state_without_numpy():
with patch.dict("sys.modules", {"numpy": None}):
_test_keep_random_state(with_numpy=False)
def test_strict_resume_from_iter():
def _test(epoch_length=None):
max_epochs = 5
num_iters = 21
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters,))
if epoch_length is None:
epoch_length = num_iters
for resume_iteration in range(2, min(num_iters * max_epochs, epoch_length * max_epochs), 4):
batch_checker = BatchChecker(data, init_counter=resume_iteration)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_iteration} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
@engine.on(Events.EPOCH_COMPLETED)
def check_iteration(_):
assert engine.state.iteration == batch_checker.counter
resume_state_dict = dict(
iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
engine.run(data)
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
_test(60)
_test(15)
def test_strict_resume_from_epoch():
def _test(epoch_length=None):
max_epochs = 10
num_iters = 21
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters,))
if epoch_length is None:
epoch_length = num_iters
for resume_epoch in range(1, max_epochs):
batch_checker = BatchChecker(data, init_counter=resume_epoch * epoch_length)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
resume_state_dict = dict(
epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
engine.run(data)
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
_test(60)
_test(15)
def _test_resume_random_dataloader_from_epoch(device, _setup_sampler, sampler_type=None):
def _test(epoch_length=None):
max_epochs = 5
total_batch_size = 4
num_iters = 21
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters * total_batch_size,))
if epoch_length is None:
epoch_length = num_iters
for resume_epoch in range(1, max_epochs, 2):
for num_workers in [0, 2]:
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
orig_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
seen_batchs = []
def update_fn(_, batch):
batch_to_device = batch.to(device)
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch - 1)
torch.manual_seed(87)
engine.run(
orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length,
)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length)
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
resume_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
def update_fn(_, batch):
batch_to_device = batch.to(device)
assert batch_checker.check(
batch
), f"{num_workers} {resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch - 1)
resume_state_dict = dict(
epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(87)
engine.run(resume_dataloader)
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
if sampler_type != "distributed":
_test(60)
_test(15)
@pytest.mark.skipif("win" in sys.platform, reason="Skip extremely slow test on Windows/MacOSX")
def test_resume_random_dataloader_from_epoch():
_test_resume_random_dataloader_from_epoch("cpu", setup_sampler)
_test_resume_random_dataloader_from_epoch("cpu", setup_sampler, sampler_type="weighted")
_test_resume_random_dataloader_from_epoch("cpu", setup_sampler, sampler_type="distributed")
class AugmentedData:
def __init__(self, data, enabled=True):
self.data = data
self.enabled = enabled
def __getitem__(self, i):
dp = self.data[i]
r = torch.randint_like(dp, -100, 100) if self.enabled else 0.0
return dp + r * 0.01
def __len__(self):
return len(self.data)
def _test_resume_random_dataloader_from_iter(device, _setup_sampler, sampler_type=None):
def _test(epoch_length=None):
max_epochs = 3
total_batch_size = 4
num_iters = 17
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters * total_batch_size,))
if epoch_length is None:
epoch_length = num_iters
for resume_iteration in range(2, min(num_iters * max_epochs, epoch_length * max_epochs), 13):
for num_workers in [0, 2]:
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
orig_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
seen_batchs = []
def update_fn(_, batch):
batch_to_device = batch.to(device)
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch)
torch.manual_seed(12)
engine.run(
orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length,
)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration)
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
resume_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
def update_fn(_, batch):
batch_to_device = batch.to(device)
cfg_msg = f"{num_workers} {resume_iteration}"
assert batch_checker.check(
batch
), f"{cfg_msg} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch)
resume_state_dict = dict(
iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(12)
engine.run(resume_dataloader)
assert engine.state.epoch == max_epochs
assert (
engine.state.iteration == epoch_length * max_epochs
), f"{num_workers}, {resume_iteration} | {engine.state.iteration} vs {epoch_length * max_epochs}"
_test()
if sampler_type != "distributed":
_test(40)
_test(11)
@pytest.mark.skipif("win" in sys.platform, reason="Skip extremely slow test on Windows/MacOSX")
def test_resume_random_dataloader_from_iter():
_test_resume_random_dataloader_from_iter("cpu", setup_sampler)
_test_resume_random_dataloader_from_iter("cpu", setup_sampler, sampler_type="weighted")
_test_resume_random_dataloader_from_iter("cpu", setup_sampler, sampler_type="distributed")
def _test_resume_random_data_iterator_from_epoch(device):
def _test(epoch_length=None):
max_epochs = 5
batch_size = 4
num_iters = 21
def infinite_data_iterator():
while True:
for _ in range(num_iters):
data = torch.randint(0, 1000, size=(batch_size,), device=device)
yield data
if epoch_length is None:
epoch_length = num_iters
for resume_epoch in range(1, max_epochs):
seen_batchs = []
def update_fn(_, batch):
# if there is a random op when using data batch etc, we can not resume correctly
# torch.rand(1)
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
torch.manual_seed(121)
engine.run(
infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length,
)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
resume_state_dict = dict(
epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(121)
engine.run(infinite_data_iterator())
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
_test(60)
_test(15)
def test_resume_random_data_iterator_from_epoch():
_test_resume_random_data_iterator_from_epoch("cpu")
def _test_resume_random_data_iterator_from_iter(device):
def _test(epoch_length=None):
max_epochs = 3
batch_size = 4
num_iters = 17
def infinite_data_iterator():
while True:
for _ in range(num_iters):
data = torch.randint(0, 1000, size=(batch_size,), device=device)
yield data
if epoch_length is None:
epoch_length = num_iters
for resume_iteration in range(1, min(num_iters * max_epochs, epoch_length * max_epochs), 7):
seen_batchs = []
def update_fn(_, batch):
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
torch.manual_seed(24)
engine.run(
infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length,
)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_iteration} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
resume_state_dict = dict(
iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(24)
engine.run(infinite_data_iterator())
assert engine.state.epoch == max_epochs
assert (
engine.state.iteration == epoch_length * max_epochs
), f"{resume_iteration} | {engine.state.iteration} vs {epoch_length * max_epochs}"
_test()
_test(50)
_test(11)
def test_resume_random_data_iterator_from_iter():
_test_resume_random_data_iterator_from_iter("cpu")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
@pytest.mark.xfail
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
def test_concepts_snippet_resume():
# Commented imports required in the snippet
# import torch
# from torch.utils.data import DataLoader
# from ignite.engine import DeterministicEngine
# from ignite.utils import manual_seed
seen_batches = []
manual_seed(seed=15)
def random_train_data_loader(size):
data = torch.arange(0, size)
return DataLoader(data, batch_size=4, shuffle=True)
def print_train_data(engine, batch):
i = engine.state.iteration
e = engine.state.epoch
print("train", e, i, batch.tolist())
seen_batches.append(batch)
trainer = DeterministicEngine(print_train_data)
print("Original Run")
manual_seed(56)
trainer.run(random_train_data_loader(40), max_epochs=2, epoch_length=5)
original_batches = list(seen_batches)
seen_batches = []
print("Resumed Run")
trainer.load_state_dict({"epoch": 1, "epoch_length": 5, "max_epochs": 2, "rng_states": None})
manual_seed(56)
trainer.run(random_train_data_loader(40))
resumed_batches = list(seen_batches)
seen_batches = []
for b1, b2 in zip(original_batches[5:], resumed_batches):
assert (b1 == b2).all()
def test_concepts_snippet_warning():
def random_train_data_generator():
while True:
yield torch.randint(0, 100, size=(1,))
def print_train_data(engine, batch):
i = engine.state.iteration
e = engine.state.epoch
print("train", e, i, batch.tolist())
trainer = DeterministicEngine(print_train_data)
@trainer.on(Events.ITERATION_COMPLETED(every=3))
def user_handler(_):
# handler synchronizes the random state
torch.manual_seed(12)
a = torch.rand(1)
trainer.run(random_train_data_generator(), max_epochs=3, epoch_length=5)
def _test_gradients_on_resume(
dirname, device, with_dropout=True, with_dataaugs=True, data_size=24, batch_size=4, save_iter=None, save_epoch=None
):
debug = False
def random_train_data_loader(size):
d = AugmentedData(torch.rand(size, 3, 32, 32), enabled=with_dataaugs)
return DataLoader(d, batch_size=batch_size, shuffle=True, num_workers=2)
def _train(save_iter=None, save_epoch=None, sd=None):
w_norms = []
grad_norms = []
data = []
chkpt = []
manual_seed(12)
arch = [
nn.Conv2d(3, 10, 3),
nn.ReLU(),
nn.Conv2d(10, 10, 3),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(10, 5),
nn.ReLU(),
nn.Linear(5, 2),
]
if with_dropout:
arch.insert(2, nn.Dropout2d())
arch.insert(-2, nn.Dropout())
model = nn.Sequential(*arch).to(device)
opt = SGD(model.parameters(), lr=0.001)
def proc_fn(e, b):
from ignite.engine.deterministic import _get_rng_states, _repr_rng_state
s = _repr_rng_state(_get_rng_states())
model.train()
opt.zero_grad()
y = model(b.to(device))
y.sum().backward()
opt.step()
if debug:
print(
trainer.state.iteration, trainer.state.epoch, "proc_fn - b.shape", b.shape, torch.norm(y).item(), s
)
trainer = DeterministicEngine(proc_fn)
if save_iter is not None:
ev = Events.ITERATION_COMPLETED(once=save_iter)
elif save_epoch is not None:
ev = Events.EPOCH_COMPLETED(once=save_epoch)
save_iter = save_epoch * (data_size // batch_size)
@trainer.on(ev)
def save_chkpt(_):
if debug:
print(trainer.state.iteration, "save_chkpt")
fp = os.path.join(dirname, "test.pt")
from ignite.engine.deterministic import _repr_rng_state
tsd = trainer.state_dict()
if debug:
print("->", _repr_rng_state(tsd["rng_states"]))
torch.save([model.state_dict(), opt.state_dict(), tsd], fp)
chkpt.append(fp)
def log_event_filter(_, event):
if (event // save_iter == 1) and 1 <= (event % save_iter) <= 5:
return True
return False
@trainer.on(Events.ITERATION_COMPLETED(event_filter=log_event_filter))
def write_data_grads_weights(e):
x = e.state.batch
i = e.state.iteration
data.append([i, x.mean().item(), x.std().item()])
total = [0.0, 0.0]
out1 = []
out2 = []
for p in model.parameters():
n1 = torch.norm(p).item()
n2 = torch.norm(p.grad).item()
out1.append(n1)
out2.append(n2)
total[0] += n1
total[1] += n2
w_norms.append([i, total[0]] + out1)
grad_norms.append([i, total[1]] + out2)
if sd is not None:
sd = torch.load(sd)
model.load_state_dict(sd[0])
opt.load_state_dict(sd[1])
from ignite.engine.deterministic import _repr_rng_state
if debug:
print("-->", _repr_rng_state(sd[2]["rng_states"]))
trainer.load_state_dict(sd[2])
manual_seed(32)
trainer.run(random_train_data_loader(size=data_size), max_epochs=5)
return {"sd": chkpt, "data": data, "grads": grad_norms, "weights": w_norms}
out_original = _train(save_iter=save_iter, save_epoch=save_epoch)
assert len(out_original["sd"]) > 0
out_resumed = _train(save_iter=save_iter, save_epoch=save_epoch, sd=out_original["sd"][0])
if debug:
print("Original:")
print(" data:", out_original["data"])
print("grads:", out_original["grads"])
print(" W:", out_original["weights"])
print("Resume:")
print(" data:", out_resumed["data"])
print("grads:", out_resumed["grads"])
print(" W:", out_resumed["weights"])
# check data:
for d1, d2 in zip(out_original["data"], out_resumed["data"]):
assert d1 == d2
# check grads:
for d1, d2 in zip(out_original["grads"], out_resumed["grads"]):
assert d1 == d2
# check weights:
for d1, d2 in zip(out_original["weights"], out_resumed["weights"]):
assert d1 == d2
def test_gradients_on_resume_cpu(dirname):
with pytest.raises(AssertionError):
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=True, save_iter=25)
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=False, save_iter=25)
# resume from epoch
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=True, save_epoch=3)
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=False, save_epoch=3)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_gradients_on_resume_on_cuda(dirname):
with pytest.raises(AssertionError):
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=True, save_iter=25)
with pytest.raises(AssertionError):
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=False, save_iter=25)
# resume from epoch
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=True, save_epoch=3)
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=False, save_epoch=3)
def test_engine_with_dataloader_no_auto_batching():
# tests https://github.com/pytorch/ignite/issues/941
data = torch.rand(64, 4, 10)
data_loader = DataLoader(
data, batch_size=None, sampler=BatchSampler(RandomSampler(data), batch_size=8, drop_last=True)
)
counter = [0]
def foo(e, b):
print(f"{e.state.epoch}-{e.state.iteration}: {b}")
counter[0] += 1
engine = DeterministicEngine(foo)
engine.run(data_loader, epoch_length=10, max_epochs=5)
assert counter[0] == 50
def test_run_finite_iterator_no_epoch_length():
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = DeterministicEngine(lambda e, b: bc.check(b))
@engine.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
engine.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == unknown_size * 5
class OldDataLoader(DataLoader):
def __init__(self, dl, *args, **kwargs):
self.dl = dl
self.sampler = self.dl.sampler
self.batch_sampler = self.dl.batch_sampler
def __len__(self):
return len(self.dl)
def __iter__(self):
return iter(self.dl)
def test_dataloader_no_dataset_kind():
# tests issue : https://github.com/pytorch/ignite/issues/1022
engine = DeterministicEngine(lambda e, b: None)
data = torch.randint(0, 1000, size=(100 * 4,))
dataloader = DataLoader(data, batch_size=4)
dataloader = OldDataLoader(dataloader)
engine.run(dataloader)
| [
"torch.nn.Linear",
"torch.utils.data.RandomSampler",
"torch.cuda.is_available",
"torch.load",
"torch.randint_like",
"torch.norm",
"torch.manual_seed",
"torch.randint",
"torch.utils.data.DataLoader",
"torch.nn.Flatten",
"torch.device",
"torch.nn.Sequential",
"torch.cuda.device_count",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.rand",
"torch.nn.Dropout",
"torch.arange",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Dropout2d"
] | 1.3 | Juddd/ignite | 00a208a4e7a7783e9ddac18931085fca2f0dec47 |
1.3 | import os
import pytest
import torch
from ignite.distributed.comp_models import has_xla_support
if not has_xla_support:
pytest.skip("Skip if no XLA support", allow_module_level=True)
else:
from ignite.distributed.comp_models.xla import _XlaDistModel
@pytest.mark.tpu
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_model():
available_backends = _XlaDistModel.available_backends
assert "xla-tpu" in available_backends
with pytest.raises(ValueError, match=r"Backend should be one of"):
_XlaDistModel.create_from_backend("abc")
def _test_xla_spawn_fn(local_rank, world_size, device):
from ignite.distributed.utils import _model
assert isinstance(_model, _XlaDistModel), f"{type(_model)} vs _XlaDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
d = _model.device()
assert isinstance(d, torch.device) and d.type == device
assert _model.get_rank() == local_rank
assert _model.get_nproc_per_node() == world_size
assert _model.get_node_rank() == 0
assert _model.get_nnodes() == 1
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_spawn_one_proc():
try:
_XlaDistModel.spawn(
_test_xla_spawn_fn, args=(1, "xla"), kwargs_dict={}, nproc_per_node=1,
)
except SystemExit:
pass
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_spawn_n_procs():
n = int(os.environ["NUM_TPU_WORKERS"])
try:
_XlaDistModel.spawn(
_test_xla_spawn_fn, args=(n, "xla"), kwargs_dict={}, nproc_per_node=n,
)
except SystemExit:
pass
def _assert_model(model, true_conf):
assert model.device() == true_conf["device"]
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_create_from_backend():
# without spawn
model = _XlaDistModel.create_from_backend("xla-tpu")
import torch_xla.core.xla_model as xm
_assert_model(
model,
{
"device": xm.xla_device(),
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_create_from_context():
# without spawn
model = _XlaDistModel.create_from_context()
assert model.backend() == "xla-tpu"
import torch_xla.core.xla_model as xm
_assert_model(
model,
{
"device": xm.xla_device(),
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
def _test__xla_dist_model_create_from_context_in_child_proc(index):
model = _XlaDistModel.create_from_context()
assert model.backend() == "xla-tpu"
import torch_xla.core.xla_model as xm
_assert_model(
model,
{
"device": xm.xla_device(),
"local_rank": index,
"rank": xm.get_ordinal(),
"world_size": xm.xrt_world_size(),
"node_index": 0,
"nnodes": 1,
"nproc_per_node": xm.xrt_world_size(),
},
)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_create_from_context_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test__xla_dist_model_create_from_context_in_child_proc, args=(), nprocs=n)
def main_fold(fold):
import time
import torch.nn as nn
import torch.optim as optim
import torch_xla.core.xla_model as xm
from ignite.engine import Engine
device = xm.xla_device(fold)
comp_model = _XlaDistModel.create_from_context()
assert comp_model.device() == device
model = nn.Linear(100, 10)
model.to(device) # Move model before creating optimizer
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
def training_step(engine, _):
data = torch.rand(4, 100, device=device)
model.train()
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = output.sum()
loss.backward()
xm.optimizer_step(optimizer, barrier=True)
return loss.item()
trainer = Engine(training_step)
# THIS CAN BE A CAUSE OF CRASH if DEVICE is OTHER THAN device
tensor = torch.tensor([fold + 1.0], dtype=torch.float).to(comp_model.device())
xm.all_reduce("max", [tensor,])
time.sleep(0.01 * fold)
trainer.run([0] * 100, max_epochs=2)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_run_parallel_n_threads_without_sync():
# tests issue : https://github.com/pytorch/ignite/issues/1096
import torch_xla.core.xla_model as xm
from joblib import Parallel, delayed
devices = xm.get_xla_supported_devices()
folds = 1
d = 0
if len(devices) > 5:
folds = 5
d = 1
Parallel(n_jobs=folds, backend="threading")(delayed(main_fold)(i + d) for i in range(folds))
| [
"torch.nn.Linear",
"torch.rand",
"torch.tensor"
] | 1.3 | Juddd/ignite | 00a208a4e7a7783e9ddac18931085fca2f0dec47 |
1.3 | import torch
import math
from . import Encoder, Decoder, STFTFB # noqa
from .stft_fb import perfect_synthesis_window
from . import transforms
from ..dsp.consistency import mixture_consistency
def griffin_lim(mag_specgram, stft_enc, angles=None, istft_dec=None, n_iter=6, momentum=0.9):
"""Estimates matching phase from magnitude spectogram using the
'fast' Griffin Lim algorithm [1].
Args:
mag_specgram (torch.Tensor): (any, dim, ension, freq, frames) as
returned by `Encoder(STFTFB)`, the magnitude spectrogram to be
inverted.
stft_enc (Encoder[STFTFB]): The `Encoder(STFTFB())` object that was
used to compute the input `mag_spec`.
angles (None or Tensor): Angles to use to initialize the algorithm.
If None (default), angles are init with uniform ditribution.
istft_dec (None or Decoder[STFTFB]): Optional Decoder to use to get
back to the time domain. If None (default), a perfect
reconstruction Decoder is built from `stft_enc`.
n_iter (int): Number of griffin-lim iterations to run.
momentum (float): The momentum of fast Griffin-Lim. Original
Griffin-Lim is obtained for momentum=0.
Returns:
torch.Tensor: estimated waveforms of shape (any, dim, ension, time).
Examples
>>> stft = Encoder(STFTFB(n_filters=256, kernel_size=256, stride=128))
>>> wav = torch.randn(2, 1, 8000)
>>> spec = stft(wav)
>>> masked_spec = spec * torch.sigmoid(torch.randn_like(spec))
>>> mag = transforms.mag(masked_spec, -2)
>>> est_wav = griffin_lim(mag, stft, n_iter=32)
References
- [1] Perraudin et al. "A fast Griffin-Lim algorithm," WASPAA 2013.
- [2] D. W. Griffin and J. S. Lim: "Signal estimation from modified
short-time Fourier transform," ASSP 1984.
"""
# We can create perfect iSTFT from STFT Encoder
if istft_dec is None:
# Compute window for perfect resynthesis
syn_win = perfect_synthesis_window(stft_enc.filterbank.window, stft_enc.stride)
istft_dec = Decoder(STFTFB(**stft_enc.get_config(), window=syn_win))
# If no intitial phase is provided initialize uniformly
if angles is None:
angles = 2 * math.pi * torch.rand_like(mag_specgram, device=mag_specgram.device)
else:
angles = angles.view(*mag_specgram.shape)
# Initialize rebuilt (useful to use momentum)
rebuilt = 0.0
for _ in range(n_iter):
prev_built = rebuilt
# Go to the time domain
complex_specgram = transforms.from_magphase(mag_specgram, angles)
waveform = istft_dec(complex_specgram)
# And back to TF domain
rebuilt = stft_enc(waveform)
# Update phase estimates (with momentum)
diff = rebuilt - momentum / (1 + momentum) * prev_built
angles = transforms.angle(diff)
final_complex_spec = transforms.from_magphase(mag_specgram, angles)
return istft_dec(final_complex_spec)
def misi(
mixture_wav,
mag_specgrams,
stft_enc,
angles=None,
istft_dec=None,
n_iter=6,
momentum=0.0,
src_weights=None,
dim=1,
):
"""Jointly estimates matching phase from magnitude spectograms using the
Multiple Input Spectrogram Inversion (MISI) algorithm [1].
Args:
mixture_wav (torch.Tensor): (batch, time)
mag_specgrams (torch.Tensor): (batch, n_src, freq, frames) as
returned by `Encoder(STFTFB)`, the magnitude spectrograms to be
jointly inverted using MISI (modified or not).
stft_enc (Encoder[STFTFB]): The `Encoder(STFTFB())` object that was
used to compute the input `mag_spec`.
angles (None or Tensor): Angles to use to initialize the algorithm.
If None (default), angles are init with uniform ditribution.
istft_dec (None or Decoder[STFTFB]): Optional Decoder to use to get
back to the time domain. If None (default), a perfect
reconstruction Decoder is built from `stft_enc`.
n_iter (int): Number of MISI iterations to run.
momentum (float): Momentum on updates (this argument comes from
GriffinLim). Defaults to 0 as it was never proposed anywhere.
src_weights (None or torch.Tensor): Consistency weight for each source.
Shape needs to be broadcastable to `istft_dec(mag_specgrams)`.
We make sure that the weights sum up to 1 along dim `dim`.
If `src_weights` is None, compute them based on relative power.
dim (int): Axis which contains the sources in `mag_specgrams`.
Used for consistency constraint.
Returns:
torch.Tensor: estimated waveforms of shape (batch, n_src, time).
Examples
>>> stft = Encoder(STFTFB(n_filters=256, kernel_size=256, stride=128))
>>> wav = torch.randn(2, 3, 8000)
>>> specs = stft(wav)
>>> masked_specs = specs * torch.sigmoid(torch.randn_like(specs))
>>> mag = transforms.mag(masked_specs, -2)
>>> est_wav = misi(wav.sum(1), mag, stft, n_iter=32)
References
[1] Gunawan and Sen, "Iterative Phase Estimation for the Synthesis of
Separated Sources From Single-Channel Mixtures," in IEEE Signal
Processing Letters, 2010.
[2] Wang, LeRoux et al. “End-to-End Speech Separation with Unfolded
Iterative Phase Reconstruction.” Interspeech 2018 (2018)
"""
# We can create perfect iSTFT from STFT Encoder
if istft_dec is None:
# Compute window for perfect resynthesis
syn_win = perfect_synthesis_window(stft_enc.filterbank.window, stft_enc.stride)
istft_dec = Decoder(STFTFB(**stft_enc.get_config(), window=syn_win))
# If no intitial phase is provided initialize uniformly
if angles is None:
angles = 2 * math.pi * torch.rand_like(mag_specgrams, device=mag_specgrams.device)
# wav_dim is used in mixture_consistency.
# Transform spec src dim to wav src dim for positive and negative dim
wav_dim = dim if dim >= 0 else dim + 1
# We forward/backward the mixture through STFT to have matching shapes
# with the input spectrograms as well as account for potential modulations
# if the window were not chosen to enable perfect reconstruction.
mixture_wav = istft_dec(stft_enc(mixture_wav))
# Initialize rebuilt (useful to use momentum)
rebuilt = 0.0
for _ in range(n_iter):
prev_built = rebuilt
# Go to the time domain
complex_specgram = transforms.from_magphase(mag_specgrams, angles)
wavs = istft_dec(complex_specgram)
# Make wavs sum up to the mixture
consistent_wavs = mixture_consistency(
mixture_wav, wavs, src_weights=src_weights, dim=wav_dim
)
# Back to TF domain
rebuilt = stft_enc(consistent_wavs)
# Update phase estimates (with momentum). Keep the momentum here
# in case. Was shown useful in GF, might be here. We'll see.
diff = rebuilt - momentum / (1 + momentum) * prev_built
angles = transforms.angle(diff)
# Final source estimates
final_complex_spec = transforms.from_magphase(mag_specgrams, angles)
return istft_dec(final_complex_spec)
| [
"torch.rand_like"
] | 1.3.0 | mcernak/asteroid | ed25e166a3bd338547248938116ba614ecfa4b3e |
1.3 | import torch
from .. import complex_nn
from ..filterbanks.transforms import from_torchaudio
from ..masknn.recurrent import DCCRMaskNet
from .dcunet import BaseDCUNet
class DCCRNet(BaseDCUNet):
"""DCCRNet as proposed in [1].
Args:
architecture (str): The architecture to use, must be "DCCRN-CL".
stft_kernel_size (int): STFT frame length to use
stft_stride (int, optional): STFT hop length to use.
sample_rate (float): Sampling rate of the model.
masknet_kwargs (optional): Passed to :class:`DCCRMaskNet`
References
- [1] : "DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement",
Yanxin Hu et al. https://arxiv.org/abs/2008.00264
"""
masknet_class = DCCRMaskNet
def __init__(self, *args, stft_kernel_size=512, **masknet_kwargs):
masknet_kwargs.setdefault("n_freqs", stft_kernel_size // 2)
super().__init__(
*args,
stft_kernel_size=stft_kernel_size,
**masknet_kwargs,
)
def forward_encoder(self, wav):
tf_rep = self.encoder(wav)
# Remove Nyquist frequency bin
return complex_nn.as_torch_complex(tf_rep)[..., :-1, :]
def apply_masks(self, tf_rep, est_masks):
masked_tf_rep = est_masks * tf_rep.unsqueeze(1)
# Pad Nyquist frequency bin
return from_torchaudio(
torch.view_as_real(torch.nn.functional.pad(masked_tf_rep, (0, 0, 0, 1)))
)
| [
"torch.nn.functional.pad"
] | 1.3.0 | mcernak/asteroid | ed25e166a3bd338547248938116ba614ecfa4b3e |
1.0 | """
Inference algorithms and utilities used in the RSA example models.
Adapted from: http://dippl.org/chapters/03-enumeration.html
"""
from __future__ import absolute_import, division, print_function
import collections
import six
import torch
from six.moves import queue
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer.abstract_infer import TracePosterior
from pyro.poutine.runtime import NonlocalExit
if six.PY3:
import functools
else:
import functools32 as functools
def memoize(fn=None, **kwargs):
if fn is None:
return lambda _fn: memoize(_fn, **kwargs)
return functools.lru_cache(**kwargs)(fn)
def factor(name, value):
"""
Like factor in webPPL, adds a scalar weight to the log-probability of the trace
"""
value = value if torch.is_tensor(value) else torch.tensor(value)
d = dist.Bernoulli(logits=value)
pyro.sample(name, d, obs=torch.ones(value.size()))
class HashingMarginal(dist.Distribution):
"""
:param trace_dist: a TracePosterior instance representing a Monte Carlo posterior
Marginal histogram distribution.
Turns a TracePosterior object into a Distribution
over the return values of the TracePosterior's model.
"""
def __init__(self, trace_dist, sites=None):
assert isinstance(trace_dist, TracePosterior), \
"trace_dist must be trace posterior distribution object"
if sites is None:
sites = "_RETURN"
assert isinstance(sites, (str, list)), \
"sites must be either '_RETURN' or list"
self.sites = sites
super(HashingMarginal, self).__init__()
self.trace_dist = trace_dist
has_enumerate_support = True
@memoize(maxsize=10)
def _dist_and_values(self):
# XXX currently this whole object is very inefficient
values_map, logits = collections.OrderedDict(), collections.OrderedDict()
for tr, logit in zip(self.trace_dist.exec_traces,
self.trace_dist.log_weights):
if isinstance(self.sites, str):
value = tr.nodes[self.sites]["value"]
else:
value = {site: tr.nodes[site]["value"] for site in self.sites}
if not torch.is_tensor(logit):
logit = torch.tensor(logit)
if torch.is_tensor(value):
value_hash = hash(value.cpu().contiguous().numpy().tobytes())
elif isinstance(value, dict):
value_hash = hash(self._dict_to_tuple(value))
else:
value_hash = hash(value)
if value_hash in logits:
# Value has already been seen.
logits[value_hash] = dist.util.logsumexp(torch.stack([logits[value_hash], logit]), dim=-1)
else:
logits[value_hash] = logit
values_map[value_hash] = value
logits = torch.stack(list(logits.values())).contiguous().view(-1)
logits = logits - dist.util.logsumexp(logits, dim=-1)
d = dist.Categorical(logits=logits)
return d, values_map
def sample(self):
d, values_map = self._dist_and_values()
ix = d.sample()
return list(values_map.values())[ix]
def log_prob(self, val):
d, values_map = self._dist_and_values()
if torch.is_tensor(val):
value_hash = hash(val.cpu().contiguous().numpy().tobytes())
elif isinstance(val, dict):
value_hash = hash(self._dict_to_tuple(val))
else:
value_hash = hash(val)
return d.log_prob(torch.tensor([list(values_map.keys()).index(value_hash)]))
def enumerate_support(self):
d, values_map = self._dist_and_values()
return list(values_map.values())[:]
def _dict_to_tuple(self, d):
"""
Recursively converts a dictionary to a list of key-value tuples
Only intended for use as a helper function inside HashingMarginal!!
May break when keys cant be sorted, but that is not an expected use-case
"""
if isinstance(d, dict):
return tuple([(k, self._dict_to_tuple(d[k])) for k in sorted(d.keys())])
else:
return d
def _weighted_mean(self, value, dim=0):
weights = self._log_weights.reshape([-1] + (value.dim() - 1) * [1])
max_weight = weights.max(dim=dim)[0]
relative_probs = (weights - max_weight).exp()
return (value * relative_probs).sum(dim=dim) / relative_probs.sum(dim=dim)
@property
def mean(self):
samples = torch.stack(list(self._dist_and_values()[1].values()))
return self._weighted_mean(samples)
@property
def variance(self):
samples = torch.stack(list(self._dist_and_values()[1].values()))
deviation_squared = torch.pow(samples - self.mean, 2)
return self._weighted_mean(deviation_squared)
########################
# Exact Search inference
########################
class Search(TracePosterior):
"""
Exact inference by enumerating over all possible executions
"""
def __init__(self, model, max_tries=int(1e6), **kwargs):
self.model = model
self.max_tries = max_tries
super(Search, self).__init__(**kwargs)
def _traces(self, *args, **kwargs):
q = queue.Queue()
q.put(poutine.Trace())
p = poutine.trace(
poutine.queue(self.model, queue=q, max_tries=self.max_tries))
while not q.empty():
tr = p.get_trace(*args, **kwargs)
yield tr, tr.log_prob_sum()
###############################################
# Best-first Search Inference
###############################################
def pqueue(fn, queue):
def sample_escape(tr, site):
return (site["name"] not in tr) and \
(site["type"] == "sample") and \
(not site["is_observed"])
def _fn(*args, **kwargs):
for i in range(int(1e6)):
assert not queue.empty(), \
"trying to get() from an empty queue will deadlock"
priority, next_trace = queue.get()
try:
ftr = poutine.trace(poutine.escape(poutine.replay(fn, next_trace),
functools.partial(sample_escape,
next_trace)))
return ftr(*args, **kwargs)
except NonlocalExit as site_container:
site_container.reset_stack()
for tr in poutine.util.enum_extend(ftr.trace.copy(),
site_container.site):
# add a little bit of noise to the priority to break ties...
queue.put((tr.log_prob_sum().item() - torch.rand(1).item() * 1e-2, tr))
raise ValueError("max tries ({}) exceeded".format(str(1e6)))
return _fn
class BestFirstSearch(TracePosterior):
"""
Inference by enumerating executions ordered by their probabilities.
Exact (and results equivalent to Search) if all executions are enumerated.
"""
def __init__(self, model, num_samples=None, **kwargs):
if num_samples is None:
num_samples = 100
self.num_samples = num_samples
self.model = model
super(BestFirstSearch, self).__init__(**kwargs)
def _traces(self, *args, **kwargs):
q = queue.PriorityQueue()
# add a little bit of noise to the priority to break ties...
q.put((torch.zeros(1).item() - torch.rand(1).item() * 1e-2, poutine.Trace()))
q_fn = pqueue(self.model, queue=q)
for i in range(self.num_samples):
if q.empty():
# num_samples was too large!
break
tr = poutine.trace(q_fn).get_trace(*args, **kwargs) # XXX should block
yield tr, tr.log_prob_sum()
| [
"torch.zeros",
"torch.rand",
"torch.stack",
"torch.is_tensor",
"torch.tensor",
"torch.pow"
] | 1.0.0 | gavincangan/pyro | d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a |
1.0 | from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
from torch.distributions import constraints
from pyro.distributions.torch_transform import TransformModule
from pyro.distributions.util import copy_docs_from
# This helper function clamps gradients but still passes through the gradient in clamped regions
def clamp_preserve_gradients(x, min, max):
return x + (x.clamp(min, max) - x).detach()
@copy_docs_from(TransformModule)
class InverseAutoregressiveFlow(TransformModule):
"""
An implementation of Inverse Autoregressive Flow, using Eq (10) from Kingma Et Al., 2016,
:math:`\\mathbf{y} = \\mu_t + \\sigma_t\\odot\\mathbf{x}`
where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs, :math:`\\mu_t,\\sigma_t`
are calculated from an autoregressive network on :math:`\\mathbf{x}`, and :math:`\\sigma_t>0`.
Together with `TransformedDistribution` this provides a way to create richer variational approximations.
Example usage:
>>> from pyro.nn import AutoRegressiveNN
>>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))
>>> iaf = InverseAutoregressiveFlow(AutoRegressiveNN(10, [40]))
>>> pyro.module("my_iaf", iaf) # doctest: +SKIP
>>> iaf_dist = dist.TransformedDistribution(base_dist, [iaf])
>>> iaf_dist.sample() # doctest: +SKIP
tensor([-0.4071, -0.5030, 0.7924, -0.2366, -0.2387, -0.1417, 0.0868,
0.1389, -0.4629, 0.0986])
The inverse of the Bijector is required when, e.g., scoring the log density of a sample with
`TransformedDistribution`. This implementation caches the inverse of the Bijector when its forward
operation is called, e.g., when sampling from `TransformedDistribution`. However, if the cached value
isn't available, either because it was overwritten during sampling a new value or an arbitary value is
being scored, it will calculate it manually. Note that this is an operation that scales as O(D) where D is
the input dimension, and so should be avoided for large dimensional uses. So in general, it is cheap
to sample from IAF and score a value that was sampled by IAF, but expensive to score an arbitrary value.
:param autoregressive_nn: an autoregressive neural network whose forward call returns a real-valued
mean and logit-scale as a tuple
:type autoregressive_nn: nn.Module
:param log_scale_min_clip: The minimum value for clipping the log(scale) from the autoregressive NN
:type log_scale_min_clip: float
:param log_scale_max_clip: The maximum value for clipping the log(scale) from the autoregressive NN
:type log_scale_max_clip: float
References:
1. Improving Variational Inference with Inverse Autoregressive Flow [arXiv:1606.04934]
Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling
2. Variational Inference with Normalizing Flows [arXiv:1505.05770]
Danilo Jimenez Rezende, Shakir Mohamed
3. MADE: Masked Autoencoder for Distribution Estimation [arXiv:1502.03509]
Mathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, autoregressive_nn, log_scale_min_clip=-5., log_scale_max_clip=3.):
super(InverseAutoregressiveFlow, self).__init__(cache_size=1)
self.arn = autoregressive_nn
self._cached_log_scale = None
self.log_scale_min_clip = log_scale_min_clip
self.log_scale_max_clip = log_scale_max_clip
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x=>y; in the prototypical context of a TransformedDistribution `x` is a
sample from the base distribution (or the output of a previous flow)
"""
mean, log_scale = self.arn(x)
log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)
self._cached_log_scale = log_scale
scale = torch.exp(log_scale)
y = scale * x + mean
return y
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x. Uses a previously cached inverse if available, otherwise performs the inversion afresh.
"""
x_size = y.size()[:-1]
perm = self.arn.permutation
input_dim = y.size(-1)
x = [torch.zeros(x_size, device=y.device)] * input_dim
# NOTE: Inversion is an expensive operation that scales in the dimension of the input
for idx in perm:
mean, log_scale = self.arn(torch.stack(x, dim=-1))
inverse_scale = torch.exp(-clamp_preserve_gradients(
log_scale[..., idx], min=self.log_scale_min_clip, max=self.log_scale_max_clip))
mean = mean[..., idx]
x[idx] = (y[..., idx] - mean) * inverse_scale
x = torch.stack(x, dim=-1)
log_scale = clamp_preserve_gradients(log_scale, min=self.log_scale_min_clip, max=self.log_scale_max_clip)
self._cached_log_scale = log_scale
return x
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log jacobian
"""
if self._cached_log_scale is not None:
log_scale = self._cached_log_scale
else:
_, log_scale = self.arn(x)
log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)
return log_scale.sum(-1)
@copy_docs_from(TransformModule)
class InverseAutoregressiveFlowStable(TransformModule):
"""
An implementation of an Inverse Autoregressive Flow, using Eqs (13)/(14) from Kingma Et Al., 2016,
:math:`\\mathbf{y} = \\sigma_t\\odot\\mathbf{x} + (1-\\sigma_t)\\odot\\mu_t`
where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs, :math:`\\mu_t,\\sigma_t`
are calculated from an autoregressive network on :math:`\\mathbf{x}`, and :math:`\\sigma_t` is
restricted to :math:`(0,1)`.
This variant of IAF is claimed by the authors to be more numerically stable than one using Eq (10),
although in practice it leads to a restriction on the distributions that can be represented,
presumably since the input is restricted to rescaling by a number on :math:`(0,1)`.
Example usage:
>>> from pyro.nn import AutoRegressiveNN
>>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))
>>> iaf = InverseAutoregressiveFlowStable(AutoRegressiveNN(10, [40]))
>>> iaf_module = pyro.module("my_iaf", iaf)
>>> iaf_dist = dist.TransformedDistribution(base_dist, [iaf])
>>> iaf_dist.sample() # doctest: +SKIP
tensor([-0.4071, -0.5030, 0.7924, -0.2366, -0.2387, -0.1417, 0.0868,
0.1389, -0.4629, 0.0986])
See `InverseAutoregressiveFlow` docs for a discussion of the running cost.
:param autoregressive_nn: an autoregressive neural network whose forward call returns a real-valued
mean and logit-scale as a tuple
:type autoregressive_nn: nn.Module
:param sigmoid_bias: bias on the hidden units fed into the sigmoid; default=`2.0`
:type sigmoid_bias: float
References:
1. Improving Variational Inference with Inverse Autoregressive Flow [arXiv:1606.04934]
Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling
2. Variational Inference with Normalizing Flows [arXiv:1505.05770]
Danilo Jimenez Rezende, Shakir Mohamed
3. MADE: Masked Autoencoder for Distribution Estimation [arXiv:1502.03509]
Mathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, autoregressive_nn, sigmoid_bias=2.0):
super(InverseAutoregressiveFlowStable, self).__init__(cache_size=1)
self.arn = autoregressive_nn
self.sigmoid = nn.Sigmoid()
self.logsigmoid = nn.LogSigmoid()
self.sigmoid_bias = sigmoid_bias
self._cached_log_scale = None
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x=>y; in the prototypical context of a TransformedDistribution `x` is a
sample from the base distribution (or the output of a previous flow)
"""
mean, logit_scale = self.arn(x)
logit_scale = logit_scale + self.sigmoid_bias
scale = self.sigmoid(logit_scale)
log_scale = self.logsigmoid(logit_scale)
self._cached_log_scale = log_scale
y = scale * x + (1 - scale) * mean
return y
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x.
"""
x_size = y.size()[:-1]
perm = self.arn.permutation
input_dim = y.size(-1)
x = [torch.zeros(x_size, device=y.device)] * input_dim
# NOTE: Inversion is an expensive operation that scales in the dimension of the input
for idx in perm:
mean, logit_scale = self.arn(torch.stack(x, dim=-1))
inverse_scale = 1 + torch.exp(-logit_scale[..., idx] - self.sigmoid_bias)
x[idx] = inverse_scale * y[..., idx] + (1 - inverse_scale) * mean[..., idx]
self._cached_log_scale = inverse_scale
x = torch.stack(x, dim=-1)
return x
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log jacobian
"""
if self._cached_log_scale is not None:
log_scale = self._cached_log_scale
else:
_, logit_scale = self.arn(x)
log_scale = self.logsigmoid(logit_scale + self.sigmoid_bias)
return log_scale.sum(-1)
| [
"torch.zeros",
"torch.stack",
"torch.nn.Sigmoid",
"torch.nn.LogSigmoid",
"torch.exp"
] | 1.0.0 | gavincangan/pyro | d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a |
1.0 | from __future__ import absolute_import, division, print_function
import logging
from collections import defaultdict, namedtuple
import pytest
import torch
import pyro.distributions as dist
from pyro.contrib.gp.kernels import Cosine, Matern32, RBF, WhiteNoise
from pyro.contrib.gp.likelihoods import Gaussian
from pyro.contrib.gp.models import (GPLVM, GPRegression, SparseGPRegression,
VariationalGP, VariationalSparseGP)
from pyro.contrib.gp.util import train
from pyro.infer.mcmc.hmc import HMC
from pyro.infer.mcmc.mcmc import MCMC
from tests.common import assert_equal
logger = logging.getLogger(__name__)
T = namedtuple("TestGPModel", ["model_class", "X", "y", "kernel", "likelihood"])
X = torch.tensor([[1., 5., 3.], [4., 3., 7.]])
y1D = torch.tensor([2., 1.])
y2D = torch.tensor([[1., 2.], [3., 3.], [1., 4.], [-1., 1.]])
noise = torch.tensor(1e-7)
def _kernel():
return RBF(input_dim=3, variance=torch.tensor(3.), lengthscale=torch.tensor(2.))
def _likelihood():
return Gaussian(torch.tensor(1e-7))
def _TEST_CASES():
TEST_CASES = [
T(
GPRegression,
X, y1D, _kernel(), noise
),
T(
GPRegression,
X, y2D, _kernel(), noise
),
T(
SparseGPRegression,
X, y1D, _kernel(), noise
),
T(
SparseGPRegression,
X, y2D, _kernel(), noise
),
T(
VariationalGP,
X, y1D, _kernel(), _likelihood()
),
T(
VariationalGP,
X, y2D, _kernel(), _likelihood()
),
T(
VariationalSparseGP,
X, y1D, _kernel(), _likelihood()
),
T(
VariationalSparseGP,
X, y2D, _kernel(), _likelihood()
),
]
return TEST_CASES
TEST_IDS = [t[0].__name__ + "_y{}D".format(str(t[2].dim()))
for t in _TEST_CASES()]
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS)
def test_model(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, None, kernel, X, likelihood)
else:
gp = model_class(X, None, kernel, likelihood)
loc, var = gp.model()
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(var, torch.ones(var.shape[-1]).expand(var.shape))
else:
assert_equal(loc.norm().item(), 0)
assert_equal(var, kernel(X).diag())
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS)
def test_forward(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.dim() == y.dim()
assert loc0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == y.shape[:-1]
assert cov0.shape[:-2] == y.shape[:-1]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
assert_equal(loc0, loc1)
n = Xnew.shape[0]
cov0_diag = torch.stack([mat.diag() for mat in cov0.view(-1, n, n)]).reshape(var1.shape)
assert_equal(cov0_diag, var1)
# test trivial forward: Xnew = X
loc, cov = gp(X, full_cov=True)
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape))
else:
assert_equal(loc, y)
assert_equal(cov.norm().item(), 0)
# test same input forward: Xnew[0,:] = Xnew[1,:] = ...
Xnew = torch.tensor([[2.0, 3.0, 1.0]]).expand(10, 3)
loc, cov = gp(Xnew, full_cov=True)
loc_diff = loc - loc[..., :1].expand(y.shape[:-1] + (10,))
assert_equal(loc_diff.norm().item(), 0)
cov_diff = cov - cov[..., :1, :1].expand(y.shape[:-1] + (10, 10))
assert_equal(cov_diff.norm().item(), 0)
# test noise kernel forward: kernel = WhiteNoise
gp.kernel = WhiteNoise(input_dim=3, variance=torch.tensor(10.))
loc, cov = gp(X, full_cov=True)
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape) * 10)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS)
def test_forward_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape, no need for test
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is VariationalSparseGP
gp = model_class(X, y, kernel, X, likelihood, latent_shape=torch.Size([]))
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.shape[-1] == Xnew.shape[0]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == torch.Size([])
assert cov0.shape[:-2] == torch.Size([])
assert_equal(loc0, loc1)
assert_equal(cov0.diag(), var1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS)
@pytest.mark.init(rng_seed=0)
def test_inference(model_class, X, y, kernel, likelihood):
# skip variational GP models because variance/lengthscale highly
# depend on variational parameters
if model_class is VariationalGP or model_class is VariationalSparseGP:
return
elif model_class is GPRegression:
gp = model_class(X, y, RBF(input_dim=3), likelihood)
else: # model_class is SparseGPRegression
gp = model_class(X, y, RBF(input_dim=3), X, likelihood)
# fix inducing points because variance/lengthscale highly depend on it
gp.Xu.requires_grad_(False)
generator = dist.MultivariateNormal(torch.zeros(X.shape[0]), kernel(X))
target_y = generator(sample_shape=torch.Size([1000])).detach()
gp.set_data(X, target_y)
train(gp)
y_cov = gp.kernel(X)
target_y_cov = kernel(X)
assert_equal(y_cov, target_y_cov, prec=0.1)
@pytest.mark.init(rng_seed=0)
def test_inference_sgpr():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0., 5.5, 0.5)
sgpr = SparseGPRegression(X, y, kernel, Xu)
train(sgpr)
Xnew = torch.arange(0., 5.05, 0.05)
loc, var = sgpr(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.init(rng_seed=0)
def test_inference_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0., 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian())
optimizer = torch.optim.Adam(vsgp.parameters(), lr=0.03)
train(vsgp, optimizer)
Xnew = torch.arange(0., 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.06)
@pytest.mark.init(rng_seed=0)
def test_inference_whiten_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0., 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian(), whiten=True)
train(vsgp)
Xnew = torch.arange(0., 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS)
def test_inference_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape (default=torch.Size([]))
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is SparseVariationalGP
gp = model_class(X, y, kernel, X.clone(), likelihood, latent_shape=torch.Size([]))
train(gp, num_steps=1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS)
def test_inference_with_whiten(model_class, X, y, kernel, likelihood):
# regression models don't use whiten
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, whiten=True)
else: # model_class is SparseVariationalGP
gp = model_class(X, y, kernel, X.clone(), likelihood, whiten=True)
train(gp, num_steps=1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS)
def test_hmc(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X.clone(), likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
if model_class is GPRegression:
model_name = "GPR"
elif model_class is SparseGPRegression:
model_name = "SGPR"
elif model_class is VariationalGP:
model_name = "VGP"
else:
model_name = "VSGP"
kernel.set_prior("variance", dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
kernel.set_prior("lengthscale", dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))
hmc_kernel = HMC(gp.model, step_size=1)
mcmc_run = MCMC(hmc_kernel, num_samples=10)
post_trace = defaultdict(list)
for trace, _ in mcmc_run._traces():
variance_name = "{}/RBF/variance".format(model_name)
post_trace["variance"].append(trace.nodes[variance_name]["value"])
lengthscale_name = "{}/RBF/lengthscale".format(model_name)
post_trace["lengthscale"].append(trace.nodes[lengthscale_name]["value"])
if model_class is VariationalGP:
f_name = "VGP/f"
post_trace["f"].append(trace.nodes[f_name]["value"])
if model_class is VariationalSparseGP:
u_name = "VSGP/u"
post_trace["u"].append(trace.nodes[u_name]["value"])
for param in post_trace:
param_mean = torch.mean(torch.stack(post_trace[param]), 0)
logger.info("Posterior mean - {}".format(param))
logger.info(param_mean)
def test_inference_deepGP():
gp1 = GPRegression(X, None, RBF(input_dim=3, variance=torch.tensor(3.),
lengthscale=torch.tensor(2.)))
Z, _ = gp1.model()
gp2 = VariationalSparseGP(Z, y2D, Matern32(input_dim=3), Z.clone(),
Gaussian(torch.tensor(1e-6)))
class DeepGP(torch.nn.Module):
def __init__(self, gp1, gp2):
super(DeepGP, self).__init__()
self.gp1 = gp1
self.gp2 = gp2
def model(self):
Z, _ = self.gp1.model()
self.gp2.set_data(Z, y2D)
self.gp2.model()
def guide(self):
self.gp1.guide()
self.gp2.guide()
deepgp = DeepGP(gp1, gp2)
train(deepgp, num_steps=1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", _TEST_CASES(), ids=TEST_IDS)
def test_gplvm(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X.clone(), likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
gplvm = GPLVM(gp)
# test inference
train(gplvm, num_steps=1)
# test forward
gplvm(Xnew=X)
def _pre_test_mean_function():
def f(x):
return 2 * x + 3 + 5 * torch.sin(7 * x)
X = torch.arange(100, dtype=torch.Tensor().dtype)
y = f(X)
Xnew = torch.arange(100, 150, dtype=torch.Tensor().dtype)
ynew = f(Xnew)
kernel = Cosine(input_dim=1)
class Trend(torch.nn.Module):
def __init__(self):
super(Trend, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.))
self.b = torch.nn.Parameter(torch.tensor(1.))
def forward(self, x):
return self.a * x + self.b
trend = Trend()
return X, y, Xnew, ynew, kernel, trend
def _mape(y_true, y_pred):
return ((y_pred - y_true) / y_true).abs().mean()
def _post_test_mean_function(gpmodule, Xnew, y_true):
assert_equal(gpmodule.mean_function.a.item(), 2, prec=0.02)
assert_equal(gpmodule.mean_function.b.item(), 3, prec=0.02)
y_pred, _ = gpmodule(Xnew)
assert_equal(_mape(y_true, y_pred).item(), 0, prec=0.02)
def test_mean_function_GPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
gpmodule = GPRegression(X, y, kernel, mean_function=mean_fn)
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_SGPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
gpmodule = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn)
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_SGPR_DTC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
gpmodule = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn, approx="DTC")
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_SGPR_FITC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
gpmodule = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn, approx="FITC")
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_VGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
gpmodule = VariationalGP(X, y, kernel, likelihood, mean_function=mean_fn)
train(gpmodule)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_VGP_whiten():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
gpmodule = VariationalGP(X, y, kernel, likelihood, mean_function=mean_fn,
whiten=True)
optimizer = torch.optim.Adam(gpmodule.parameters(), lr=0.1)
train(gpmodule, optimizer)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_VSGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
likelihood = Gaussian()
gpmodule = VariationalSparseGP(X, y, kernel, Xu, likelihood, mean_function=mean_fn)
optimizer = torch.optim.Adam(gpmodule.parameters(), lr=0.02)
train(gpmodule, optimizer)
_post_test_mean_function(gpmodule, Xnew, ynew)
def test_mean_function_VSGP_whiten():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
likelihood = Gaussian()
gpmodule = VariationalSparseGP(X, y, kernel, Xu, likelihood, mean_function=mean_fn,
whiten=True)
optimizer = torch.optim.Adam(gpmodule.parameters(), lr=0.1)
train(gpmodule, optimizer)
_post_test_mean_function(gpmodule, Xnew, ynew)
| [
"torch.Size",
"torch.zeros",
"torch.stack",
"torch.sin",
"torch.arange",
"torch.ones",
"torch.tensor",
"torch.eye",
"torch.Tensor"
] | 1.0.0 | gavincangan/pyro | d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a |
1.0 | from __future__ import absolute_import, division, print_function
import math
import torch
import pyro
from pyro import poutine
from pyro.contrib.autoguide import mean_field_guide_entropy
from pyro.contrib.oed.search import Search
from pyro.contrib.util import lexpand
from pyro.infer import EmpiricalMarginal, Importance, SVI
from pyro.util import torch_isnan, torch_isinf
def vi_ape(model, design, observation_labels, target_labels,
vi_parameters, is_parameters, y_dist=None):
"""Estimates the average posterior entropy (APE) loss function using
variational inference (VI).
The APE loss function estimated by this method is defined as
:math:`APE(d)=E_{Y\\sim p(y|\\theta, d)}[H(p(\\theta|Y, d))]`
where :math:`H[p(x)]` is the `differential entropy
<https://en.wikipedia.org/wiki/Differential_entropy>`_.
The APE is related to expected information gain (EIG) by the equation
:math:`EIG(d)=H[p(\\theta)]-APE(d)`
in particular, minimising the APE is equivalent to maximising EIG.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param dict vi_parameters: Variational inference parameters which should include:
`optim`: an instance of :class:`pyro.Optim`, `guide`: a guide function
compatible with `model`, `num_steps`: the number of VI steps to make,
and `loss`: the loss function to use for VI
:param dict is_parameters: Importance sampling parameters for the
marginal distribution of :math:`Y`. May include `num_samples`: the number
of samples to draw from the marginal.
:param pyro.distributions.Distribution y_dist: (optional) the distribution
assumed for the response variable :math:`Y`
:return: Loss function estimate
:rtype: `torch.Tensor`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if target_labels is not None and isinstance(target_labels, str):
target_labels = [target_labels]
def posterior_entropy(y_dist, design):
# Important that y_dist is sampled *within* the function
y = pyro.sample("conditioning_y", y_dist)
y_dict = {label: y[i, ...] for i, label in enumerate(observation_labels)}
conditioned_model = pyro.condition(model, data=y_dict)
SVI(conditioned_model, **vi_parameters).run(design)
# Recover the entropy
return mean_field_guide_entropy(vi_parameters["guide"], [design], whitelist=target_labels)
if y_dist is None:
y_dist = EmpiricalMarginal(Importance(model, **is_parameters).run(design),
sites=observation_labels)
# Calculate the expected posterior entropy under this distn of y
loss_dist = EmpiricalMarginal(Search(posterior_entropy).run(y_dist, design))
loss = loss_dist.mean
return loss
def naive_rainforth_eig(model, design, observation_labels, target_labels=None,
N=100, M=10, M_prime=None):
"""
Naive Rainforth (i.e. Nested Monte Carlo) estimate of the expected information
gain (EIG). The estimate is
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\log p(y_n | \\theta_n, d) -
\\log \\left(\\frac{1}{M}\\sum_{m=1}^M p(y_n | \\theta_m, d)\\right)
Monte Carlo estimation is attempted for the :math:`\\log p(y | \\theta, d)` term if
the parameter `M_prime` is passed. Otherwise, it is assumed that that :math:`\\log p(y | \\theta, d)`
can safely be read from the model itself.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int N: Number of outer expectation samples.
:param int M: Number of inner expectation samples for `p(y|d)`.
:param int M_prime: Number of samples for `p(y | theta, d)` if required.
:return: EIG estimate
:rtype: `torch.Tensor`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
# Take N samples of the model
expanded_design = lexpand(design, N)
trace = poutine.trace(model).get_trace(expanded_design)
trace.compute_log_prob()
if M_prime is not None:
y_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in observation_labels}
theta_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in target_labels}
theta_dict.update(y_dict)
# Resample M values of u and compute conditional probabilities
conditional_model = pyro.condition(model, data=theta_dict)
# Not acceptable to use (M_prime, 1) here - other variables may occur after
# theta, so need to be sampled conditional upon it
reexpanded_design = lexpand(design, M_prime, N)
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
conditional_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M_prime)
else:
# This assumes that y are independent conditional on theta
# Furthermore assume that there are no other variables besides theta
conditional_lp = sum(trace.nodes[l]["log_prob"] for l in observation_labels)
y_dict = {l: lexpand(trace.nodes[l]["value"], M) for l in observation_labels}
# Resample M values of theta and compute conditional probabilities
conditional_model = pyro.condition(model, data=y_dict)
# Using (M, 1) instead of (M, N) - acceptable to re-use thetas between ys because
# theta comes before y in graphical model
reexpanded_design = lexpand(design, M, 1)
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
marginal_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M)
return (conditional_lp - marginal_lp).sum(0)/N
def donsker_varadhan_eig(model, design, observation_labels, target_labels,
num_samples, num_steps, T, optim, return_history=False,
final_design=None, final_num_samples=None):
"""
Donsker-Varadhan estimate of the expected information gain (EIG).
The Donsker-Varadhan representation of EIG is
.. math::
\\sup_T E_{p(y, \\theta | d)}[T(y, \\theta)] - \\log E_{p(y|d)p(\\theta)}[\\exp(T(\\bar{y}, \\bar{\\theta}))]
where :math:`T` is any (measurable) function.
This methods optimises the loss function over a pre-specified class of
functions `T`.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int num_samples: Number of samples per iteration.
:param int num_steps: Number of optimisation steps.
:param function or torch.nn.Module T: optimisable function `T` for use in the
Donsker-Varadhan loss function.
:param pyro.optim.Optim optim: Optimiser to use.
:param bool return_history: If `True`, also returns a tensor giving the loss function
at each step of the optimisation.
:param torch.Tensor final_design: The final design tensor to evaluate at. If `None`, uses
`design`.
:param int final_num_samples: The number of samples to use at the final evaluation, If `None,
uses `num_samples`.
:return: EIG estimate, optionally includes full optimisatio history
:rtype: `torch.Tensor` or `tuple`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
loss = donsker_varadhan_loss(model, T, observation_labels, target_labels)
return opt_eig_ape_loss(design, loss, num_samples, num_steps, optim, return_history,
final_design, final_num_samples)
def barber_agakov_ape(model, design, observation_labels, target_labels,
num_samples, num_steps, guide, optim, return_history=False,
final_design=None, final_num_samples=None):
"""
Barber-Agakov estimate of average posterior entropy (APE).
The Barber-Agakov representation of APE is
:math:`sup_{q}E_{p(y, \\theta | d)}[\\log q(\\theta | y, d)]`
where :math:`q` is any distribution on :math:`\\theta`.
This method optimises the loss over a given guide family `guide`
representing :math:`q`.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int num_samples: Number of samples per iteration.
:param int num_steps: Number of optimisation steps.
:param function guide: guide family for use in the (implicit) posterior estimation.
The parameters of `guide` are optimised to maximise the Barber-Agakov
objective.
:param pyro.optim.Optim optim: Optimiser to use.
:param bool return_history: If `True`, also returns a tensor giving the loss function
at each step of the optimisation.
:param torch.Tensor final_design: The final design tensor to evaluate at. If `None`, uses
`design`.
:param int final_num_samples: The number of samples to use at the final evaluation, If `None,
uses `num_samples`.
:return: EIG estimate, optionally includes full optimisatio history
:rtype: `torch.Tensor` or `tuple`
"""
if isinstance(observation_labels, str):
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
loss = barber_agakov_loss(model, guide, observation_labels, target_labels)
return opt_eig_ape_loss(design, loss, num_samples, num_steps, optim, return_history,
final_design, final_num_samples)
def opt_eig_ape_loss(design, loss_fn, num_samples, num_steps, optim, return_history=False,
final_design=None, final_num_samples=None):
if final_design is None:
final_design = design
if final_num_samples is None:
final_num_samples = num_samples
params = None
history = []
for step in range(num_steps):
if params is not None:
pyro.infer.util.zero_grads(params)
agg_loss, loss = loss_fn(design, num_samples)
agg_loss.backward()
if return_history:
history.append(loss)
params = [value.unconstrained()
for value in pyro.get_param_store().values()]
optim(params)
_, loss = loss_fn(final_design, final_num_samples)
if return_history:
return torch.stack(history), loss
else:
return loss
def donsker_varadhan_loss(model, T, observation_labels, target_labels):
ewma_log = EwmaLog(alpha=0.90)
try:
pyro.module("T", T)
except AssertionError:
pass
def loss_fn(design, num_particles):
expanded_design = lexpand(design, num_particles)
# Unshuffled data
unshuffled_trace = poutine.trace(model).get_trace(expanded_design)
y_dict = {l: unshuffled_trace.nodes[l]["value"] for l in observation_labels}
# Shuffled data
# Not actually shuffling, resimulate for safety
conditional_model = pyro.condition(model, data=y_dict)
shuffled_trace = poutine.trace(conditional_model).get_trace(expanded_design)
T_joint = T(expanded_design, unshuffled_trace, observation_labels,
target_labels)
T_independent = T(expanded_design, shuffled_trace, observation_labels,
target_labels)
joint_expectation = T_joint.sum(0)/num_particles
A = T_independent - math.log(num_particles)
s, _ = torch.max(A, dim=0)
independent_expectation = s + ewma_log((A - s).exp().sum(dim=0), s)
loss = joint_expectation - independent_expectation
# Switch sign, sum over batch dimensions for scalar loss
agg_loss = -loss.sum()
return agg_loss, loss
return loss_fn
def barber_agakov_loss(model, guide, observation_labels, target_labels):
def loss_fn(design, num_particles):
expanded_design = lexpand(design, num_particles)
# Sample from p(y, theta | d)
trace = poutine.trace(model).get_trace(expanded_design)
y_dict = {l: trace.nodes[l]["value"] for l in observation_labels}
theta_dict = {l: trace.nodes[l]["value"] for l in target_labels}
# Run through q(theta | y, d)
conditional_guide = pyro.condition(guide, data=theta_dict)
cond_trace = poutine.trace(conditional_guide).get_trace(
y_dict, expanded_design, observation_labels, target_labels)
cond_trace.compute_log_prob()
loss = -sum(cond_trace.nodes[l]["log_prob"] for l in target_labels).sum(0)/num_particles
agg_loss = loss.sum()
return agg_loss, loss
return loss_fn
class _EwmaLogFn(torch.autograd.Function):
@staticmethod
def forward(ctx, input, ewma):
ctx.save_for_backward(ewma)
return input.log()
@staticmethod
def backward(ctx, grad_output):
ewma, = ctx.saved_tensors
return grad_output / ewma, None
_ewma_log_fn = _EwmaLogFn.apply
class EwmaLog(object):
"""Logarithm function with exponentially weighted moving average
for gradients.
For input `inputs` this function return :code:`inputs.log()`. However, it
computes the gradient as
:math:`\\frac{\\sum_{t=0}^{T-1} \\alpha^t}{\\sum_{t=0}^{T-1} \\alpha^t x_{T-t}}`
where :math:`x_t` are historical input values passed to this function,
:math:`x_T` being the most recently seen value.
This gradient may help with numerical stability when the sequence of
inputs to the function form a convergent sequence.
"""
def __init__(self, alpha):
self.alpha = alpha
self.ewma = 0.
self.n = 0
self.s = 0.
def __call__(self, inputs, s, dim=0, keepdim=False):
"""Updates the moving average, and returns :code:`inputs.log()`.
"""
self.n += 1
if torch_isnan(self.ewma) or torch_isinf(self.ewma):
ewma = inputs
else:
ewma = inputs * (1. - self.alpha) / (1 - self.alpha**self.n) \
+ torch.exp(self.s - s) * self.ewma \
* (self.alpha - self.alpha**self.n) / (1 - self.alpha**self.n)
self.ewma = ewma.detach()
self.s = s.detach()
return _ewma_log_fn(inputs, ewma)
| [
"torch.stack",
"torch.exp",
"torch.max"
] | 1.0.0 | gavincangan/pyro | d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a |
1.8 | # -*- encoding: utf-8 -*-
# -----
# Created Date: 2021/7/16
# Author: Hanjing Wang
# -----
# Last Modified:
# Modified By:
# -----
# Copyright (c) 2020 MARL @ SJTU
# -----
import os
import ray
import copy
import pytest
import torch
import time
from malib.backend.datapool.parameter_server import (
Parameter,
ParameterDescription,
ParameterDescription,
ParameterServer,
PARAMETER_TABLE_NAME_GEN,
)
class MLP(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.layers = torch.nn.Sequential(
torch.nn.Linear(in_channels, 64),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(64, 64),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(64, out_channels),
)
for p in self.layers.parameters():
torch.nn.init.normal_(p)
def forward(self, x):
return self.layers(x)
def test_dump_and_load():
mlp1 = MLP(in_channels=10, out_channels=20)
mlp2 = MLP(in_channels=15, out_channels=20)
x1 = torch.rand(size=(16, 10))
x2 = torch.rand(size=(16, 15))
with torch.no_grad():
y1 = mlp1(x1)
y2 = mlp2(x2)
exp_cfg = {"group": "test_parameter_server", "name": "dump_and_load"}
# dump
ray.init(address=None)
parameter_server_config = {
# configuration for dumping parameters at /tmp/
"quit_job": {
"dump_when_closed": True,
# must ended with slash to indicate it is a directory
"path": "/tmp/test_ps/",
}
}
parameter_server = ParameterServer.options(
name="ParameterServer", max_concurrency=1000
).remote(test_mode=True, **parameter_server_config, exp_cfg=exp_cfg)
param_desc1 = ParameterDescription(
time_stamp=time.time(),
identify="test_agent_1",
env_id="test_env",
id="mlp1",
type=ParameterDescription.Type.PARAMETER,
lock=False,
description={"registered_name": "MLP"},
data=None,
)
param_desc2 = copy.copy(param_desc1)
param_desc1.data = mlp1.state_dict()
expected_table_name1 = (
PARAMETER_TABLE_NAME_GEN(
env_id=param_desc1.env_id,
agent_id=param_desc1.identify,
pid=param_desc1.id,
policy_type=param_desc1.description["registered_name"],
)
+ ".pkl"
)
status = ray.get(parameter_server.push.remote(param_desc1))
print(status)
param_desc2.identify = "test_agent_2"
param_desc2.id = "mlp2"
param_desc2.data = mlp2.state_dict()
expected_table_name2 = (
PARAMETER_TABLE_NAME_GEN(
env_id=param_desc2.env_id,
agent_id=param_desc2.identify,
pid=param_desc2.id,
policy_type=param_desc2.description["registered_name"],
)
+ ".pkl"
)
status = ray.get(parameter_server.push.remote(param_desc2))
print(status)
# wait for the ps to dump the data
_ = ray.get(parameter_server.shutdown.remote())
parameter_server = None
# check the existence of dumped file
files = os.listdir(parameter_server_config["quit_job"]["path"])
assert expected_table_name1 in files
assert expected_table_name2 in files
parameter_server_config.update(
{
# load the dumped parameters
"init_job": {
"load_when_start": True,
"path": parameter_server_config["quit_job"]["path"],
},
# clean the properties of quitting schedule
"quit_job": {},
}
)
parameter_server = ParameterServer.options(
name="ParameterServerRec", max_concurrency=1000
).remote(test_mode=True, **parameter_server_config, exp_cfg=exp_cfg)
epsilon = 1e-8
# clean data
param_desc1.data = None
status, mlp1_param = ray.get(
parameter_server.pull.remote(param_desc1, keep_return=True)
)
assert mlp1_param.data
mlp1.load_state_dict(mlp1_param.data)
with torch.no_grad():
y1_rec = mlp1(x1)
res = torch.sub(y1, y1_rec)
assert torch.all(res < epsilon).item()
param_desc2.data = None
status, mlp2_param = ray.get(
parameter_server.pull.remote(param_desc2, keep_return=True)
)
mlp2.load_state_dict(mlp2_param.data)
with torch.no_grad():
y2_rec = mlp2(x2)
res = torch.sub(y2, y2_rec)
assert torch.all(res < epsilon).item()
_ = ray.get(parameter_server.shutdown.remote())
ray.shutdown()
| [
"torch.nn.Linear",
"torch.rand",
"torch.sub",
"torch.no_grad",
"torch.nn.ReLU",
"torch.nn.init.normal_",
"torch.all"
] | 1.8.1 | apexrl/malib | 3785309e9b695ff359131fbbecabb6b5a52ef559 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from pytorch_lightning import LightningDataModule, LightningModule, Trainer
from pytorch_lightning.metrics.functional import accuracy
from pytorch_lightning.utilities import DistributedType
from tests.helpers import BoringModel
from tests.helpers.utils import get_default_logger, load_model_from_checkpoint, reset_seed
def run_model_test_without_loggers(
trainer_options: dict, model: LightningModule, data: LightningDataModule = None, min_acc: float = 0.50
):
reset_seed()
# fit model
trainer = Trainer(**trainer_options)
trainer.fit(model, datamodule=data)
# correct result and ok accuracy
assert trainer.state.finished, f"Training failed with {trainer.state}"
model2 = load_model_from_checkpoint(trainer.logger, trainer.checkpoint_callback.best_model_path, type(model))
# test new model accuracy
test_loaders = model2.test_dataloader() if not data else data.test_dataloader()
if not isinstance(test_loaders, list):
test_loaders = [test_loaders]
if not isinstance(model2, BoringModel):
for dataloader in test_loaders:
run_prediction_eval_model_template(model2, dataloader, min_acc=min_acc)
def run_model_test(
trainer_options,
model: LightningModule,
data: LightningDataModule = None,
on_gpu: bool = True,
version=None,
with_hpc: bool = True,
min_acc: float = 0.25
):
reset_seed()
save_dir = trainer_options['default_root_dir']
# logger file to get meta
logger = get_default_logger(save_dir, version=version)
trainer_options.update(logger=logger)
trainer = Trainer(**trainer_options)
initial_values = torch.tensor([torch.sum(torch.abs(x)) for x in model.parameters()])
trainer.fit(model, datamodule=data)
post_train_values = torch.tensor([torch.sum(torch.abs(x)) for x in model.parameters()])
assert trainer.state.finished, f"Training failed with {trainer.state}"
# Check that the model is actually changed post-training
change_ratio = torch.norm(initial_values - post_train_values)
assert change_ratio > 0.1, f"the model is changed of {change_ratio}"
# test model loading
pretrained_model = load_model_from_checkpoint(logger, trainer.checkpoint_callback.best_model_path, type(model))
# test new model accuracy
test_loaders = model.test_dataloader() if not data else data.test_dataloader()
if not isinstance(test_loaders, list):
test_loaders = [test_loaders]
if not isinstance(model, BoringModel):
for dataloader in test_loaders:
run_prediction_eval_model_template(model, dataloader, min_acc=min_acc)
if with_hpc:
if trainer._distrib_type in (DistributedType.DDP, DistributedType.DDP_SPAWN, DistributedType.DDP2):
# on hpc this would work fine... but need to hack it for the purpose of the test
trainer.optimizers, trainer.lr_schedulers, trainer.optimizer_frequencies = \
trainer.init_optimizers(pretrained_model)
# test HPC saving
trainer.checkpoint_connector.hpc_save(save_dir, logger)
# test HPC loading
checkpoint_path = trainer.checkpoint_connector.get_max_ckpt_path_from_folder(save_dir)
trainer.checkpoint_connector.hpc_load(checkpoint_path, on_gpu=on_gpu)
@torch.no_grad()
def run_prediction_eval_model_template(trained_model, dataloader, min_acc=0.50):
# run prediction on 1 batch
trained_model.cpu()
trained_model.eval()
batch = next(iter(dataloader))
x, y = batch
x = x.flatten(1)
y_hat = trained_model(x)
acc = accuracy(y_hat.cpu(), y.cpu(), top_k=2).item()
assert acc >= min_acc, f"This model is expected to get > {min_acc} in test set (it got {acc})"
| [
"torch.abs",
"torch.norm",
"torch.no_grad"
] | 1.4 | jbuckman/pytorch-lightning | cc74fb717a7127fecd4dbb9c743ba28b40de7f64 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
from unittest import mock
import pytest
import torch
from torch.utils.data import DataLoader
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators import TPUAccelerator
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import TPUSpawnPlugin
from pytorch_lightning.utilities import _TPU_AVAILABLE
from pytorch_lightning.utilities.distributed import ReduceOp
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
from tests.helpers.utils import pl_multi_process_test
if _TPU_AVAILABLE:
import torch_xla
import torch_xla.distributed.xla_multiprocessing as xmp
SERIAL_EXEC = xmp.MpSerialExecutor()
_LARGER_DATASET = RandomDataset(32, 2000)
# 8 cores needs a big dataset
def _serial_train_loader():
return DataLoader(_LARGER_DATASET, batch_size=32)
class SerialLoaderBoringModel(BoringModel):
def train_dataloader(self):
return DataLoader(RandomDataset(32, 2000), batch_size=32)
def val_dataloader(self):
return DataLoader(RandomDataset(32, 2000), batch_size=32)
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_tpu_cores_1(tmpdir):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
tpu_cores=1,
limit_train_batches=4,
limit_val_batches=4,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
@pytest.mark.parametrize('tpu_core', [1, 5])
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_tpu_index(tmpdir, tpu_core):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
tpu_cores=[tpu_core],
limit_train_batches=4,
limit_val_batches=4,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
assert torch_xla._XLAC._xla_get_default_device() == f'xla:{tpu_core}'
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_tpu_cores_8(tmpdir):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
tpu_cores=8,
limit_train_batches=4,
limit_val_batches=4,
)
# 8 cores needs a big dataset
model = SerialLoaderBoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False, min_acc=0.05)
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_16bit_tpu_cores_1(tmpdir):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
precision=16,
progress_bar_refresh_rate=0,
max_epochs=2,
tpu_cores=1,
limit_train_batches=8,
limit_val_batches=2,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
assert os.environ.get('XLA_USE_BF16') == str(1), "XLA_USE_BF16 was not set in environment variables"
@pytest.mark.parametrize('tpu_core', [1, 5])
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_16bit_tpu_index(tmpdir, tpu_core):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
precision=16,
progress_bar_refresh_rate=0,
max_epochs=2,
tpu_cores=[tpu_core],
limit_train_batches=4,
limit_val_batches=2,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
assert torch_xla._XLAC._xla_get_default_device() == f'xla:{tpu_core}'
assert os.environ.get('XLA_USE_BF16') == str(1), "XLA_USE_BF16 was not set in environment variables"
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_16bit_tpu_cores_8(tmpdir):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
precision=16,
progress_bar_refresh_rate=0,
max_epochs=1,
tpu_cores=8,
limit_train_batches=4,
limit_val_batches=4,
)
# 8 cores needs a big dataset
model = SerialLoaderBoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False, min_acc=0.05)
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_tpu_early_stop(tmpdir):
"""Test if single TPU core training works"""
class CustomBoringModel(BoringModel):
def validation_step(self, *args, **kwargs):
out = super().validation_step(*args, **kwargs)
self.log('val_loss', out['x'])
return out
tutils.reset_seed()
model = CustomBoringModel()
trainer = Trainer(
callbacks=[EarlyStopping(monitor='val_loss')],
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=2,
limit_val_batches=2,
tpu_cores=8,
)
trainer.fit(model)
trainer.test(test_dataloaders=DataLoader(RandomDataset(32, 2000), batch_size=32))
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_grad_norm(tmpdir):
"""Test if grad_norm works on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=4,
tpu_cores=1,
limit_train_batches=0.4,
limit_val_batches=0.4,
gradient_clip_val=0.5,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_clip_grad_by_value(tmpdir):
"""Test if clip_gradients by value works on TPU"""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=4,
tpu_cores=1,
limit_train_batches=10,
limit_val_batches=10,
gradient_clip_val=0.5,
gradient_clip_algorithm='value'
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
@RunIf(tpu=True)
@pl_multi_process_test
def test_dataloaders_passed_to_fit(tmpdir):
"""Test if dataloaders passed to trainer works on TPU"""
tutils.reset_seed()
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
tpu_cores=8,
)
trainer.fit(
model,
train_dataloader=model.train_dataloader(),
val_dataloaders=model.val_dataloader(),
)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@pytest.mark.parametrize(
['tpu_cores', 'expected_tpu_id'],
[pytest.param(1, None), pytest.param(8, None),
pytest.param([1], 1), pytest.param([8], 8)],
)
@RunIf(tpu=True)
def test_tpu_id_to_be_as_expected(tpu_cores, expected_tpu_id):
"""Test if trainer.tpu_id is set as expected"""
assert Trainer(tpu_cores=tpu_cores).accelerator_connector.tpu_id == expected_tpu_id
def test_tpu_misconfiguration():
"""Test if trainer.tpu_id is set as expected"""
with pytest.raises(MisconfigurationException, match="`tpu_cores` can only be"):
Trainer(tpu_cores=[1, 8])
@pytest.mark.skipif(_TPU_AVAILABLE, reason="test requires missing TPU")
def test_exception_when_no_tpu_found(tmpdir):
"""Test if exception is thrown when xla devices are not available"""
with pytest.raises(MisconfigurationException, match='No TPU devices were found.'):
Trainer(tpu_cores=8)
@pytest.mark.parametrize('tpu_cores', [1, 8, [1]])
@RunIf(tpu=True)
def test_distributed_backend_set_when_using_tpu(tmpdir, tpu_cores):
"""Test if distributed_backend is set to `tpu` when tpu_cores is not None"""
assert Trainer(tpu_cores=tpu_cores).distributed_backend == "tpu"
@RunIf(tpu=True)
@pl_multi_process_test
def test_broadcast_on_tpu():
""" Checks if an object from the master process is broadcasted to other processes correctly"""
def test_broadcast(rank):
trainer = Trainer(tpu_cores=8)
assert isinstance(trainer.accelerator, TPUAccelerator)
assert isinstance(trainer.training_type_plugin, TPUSpawnPlugin)
obj = ("ver_0.5", "logger_name", rank)
result = trainer.training_type_plugin.broadcast(obj)
assert result == ("ver_0.5", "logger_name", 0)
xmp.spawn(test_broadcast, nprocs=8, start_method='fork')
@pytest.mark.parametrize(
["tpu_cores", "expected_tpu_id", "error_expected"],
[
pytest.param(1, None, False),
pytest.param(8, None, False),
pytest.param([1], 1, False),
pytest.param([8], 8, False),
pytest.param("1,", 1, False),
pytest.param("1", None, False),
pytest.param("9, ", 9, True),
pytest.param([9], 9, True),
pytest.param([0], 0, True),
pytest.param(2, None, True),
pytest.param(10, None, True),
],
)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_choice(tmpdir, tpu_cores, expected_tpu_id, error_expected):
if error_expected:
with pytest.raises(MisconfigurationException, match=r".*tpu_cores` can only be 1, 8 or [<1-8>]*"):
Trainer(default_root_dir=tmpdir, tpu_cores=tpu_cores)
else:
trainer = Trainer(default_root_dir=tmpdir, tpu_cores=tpu_cores)
assert trainer.accelerator_connector.tpu_id == expected_tpu_id
@pytest.mark.parametrize(
['cli_args', 'expected'],
[pytest.param('--tpu_cores=8', {'tpu_cores': 8}),
pytest.param("--tpu_cores=1,", {'tpu_cores': '1,'})]
)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_cores_with_argparse(cli_args, expected):
"""Test passing tpu_cores in command line"""
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
parser = ArgumentParser(add_help=False)
parser = Trainer.add_argparse_args(parent_parser=parser)
args = Trainer.parse_argparser(parser)
for k, v in expected.items():
assert getattr(args, k) == v
assert Trainer.from_argparse_args(args)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_reduce():
"""Test tpu spawn reduce operation """
def test_reduce(rank):
trainer = Trainer(tpu_cores=8)
# faster this way
reduce_ops = ["mean", "AVG", "undefined", "sum", ReduceOp.SUM, ReduceOp.MAX]
for reduce_op in reduce_ops:
if reduce_op == "undefined" or reduce_op == ReduceOp.MAX:
with pytest.raises(MisconfigurationException, match="TPUSpawn TrainingTypePlugin only support"):
result = trainer.training_type_plugin.reduce(1, reduce_op)
else:
result = trainer.training_type_plugin.reduce(1, reduce_op)
if isinstance(reduce_op, str) and reduce_op.lower() in ("mean", "avg"):
assert result.item() == 1
else:
assert result.item() == 8
xmp.spawn(test_reduce, nprocs=8, start_method='fork')
@RunIf(tpu=True)
@pl_multi_process_test
@pytest.mark.parametrize("clip_val", [10])
@mock.patch("torch.nn.utils.clip_grad_norm_")
def test_tpu_precision_16_clip_gradients(mock_clip_grad_norm, clip_val, tmpdir):
"""
Ensure that clip gradients is only called if the value is greater than 0.
TODO: Fix (test fails with parametrize)
"""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
tpu_cores=1,
precision=16,
limit_train_batches=4,
limit_val_batches=4,
gradient_clip_val=clip_val,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
if clip_val > 0:
mock_clip_grad_norm.assert_called()
else:
mock_clip_grad_norm.assert_not_called()
@RunIf(tpu=True)
@pl_multi_process_test
def test_if_test_works_with_checkpoint_false(tmpdir):
"""Ensure that model trains properly when `checkpoint_callback` is set to False."""
# Train a model on TPU
model = BoringModel()
trainer = Trainer(max_epochs=1, tpu_cores=8, default_root_dir=tmpdir, fast_dev_run=True, checkpoint_callback=False)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_sync_dist():
"""Test tpu spawn sync dist operation """
def test_sync_dist(rank):
tensor = torch.tensor([1.0])
training_type_plugin = TPUSpawnPlugin()
res = Result()
res.log(
"test_tensor",
tensor,
sync_fn=training_type_plugin.reduce,
sync_dist=True,
sync_dist_op=torch.distributed.ReduceOp.SUM
)
assert res["test_tensor"].item() == 8, "Result-Log does not work properly with TPU Spawn and Tensors"
xmp.spawn(test_sync_dist, nprocs=8, start_method='fork')
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_debug_mode(tmpdir):
"""Test if debug mode works on TPU."""
class DebugModel(BoringModel):
def on_train_start(self):
assert os.environ.get("PT_XLA_DEBUG") == str(1), "PT_XLA_DEBUG was not set in environment variables"
def teardown(self, stage):
assert "PT_XLA_DEBUG" not in os.environ
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=4,
tpu_cores=8,
limit_train_batches=0.4,
limit_val_batches=0.4,
plugins=TPUSpawnPlugin(debug=True),
)
model = DebugModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_host_world_size(tmpdir):
"""Test Host World size env setup on TPU."""
class DebugModel(BoringModel):
def on_train_start(self):
assert os.environ.get("XRT_HOST_WORLD_SIZE") == str(1)
def teardown(self, stage):
assert "XRT_HOST_WORLD_SIZE" not in os.environ
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=4,
tpu_cores=8,
limit_train_batches=0.4,
limit_val_batches=0.4,
)
model = DebugModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
| [
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.4 | jbuckman/pytorch-lightning | cc74fb717a7127fecd4dbb9c743ba28b40de7f64 |
1.6 | import torch
import torch.nn as nn
from transformers import BertModel, BertPreTrainedModel
class FCLayer(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.0, use_activation=True):
super(FCLayer, self).__init__()
self.use_activation = use_activation
self.dropout = nn.Dropout(dropout_rate)
self.linear = nn.Linear(input_dim, output_dim)
self.tanh = nn.Tanh()
def forward(self, x):
x = self.dropout(x)
if self.use_activation:
x = self.tanh(x)
return self.linear(x)
class RBERT(BertPreTrainedModel):
def __init__(self, config, args):
super(RBERT, self).__init__(config)
self.bert = BertModel(config=config) # Load pretrained bert
self.num_labels = config.num_labels
self.cls_fc_layer = FCLayer(config.hidden_size, config.hidden_size, args.dropout_rate)
self.entity_fc_layer = FCLayer(config.hidden_size, config.hidden_size, args.dropout_rate)
self.label_classifier = FCLayer(
config.hidden_size * 3,
config.num_labels,
args.dropout_rate,
use_activation=False,
)
@staticmethod
def entity_average(hidden_output, e_mask):
"""
Average the entity hidden state vectors (H_i ~ H_j)
:param hidden_output: [batch_size, j-i+1, dim]
:param e_mask: [batch_size, max_seq_len]
e.g. e_mask[0] == [0, 0, 0, 1, 1, 1, 0, 0, ... 0]
:return: [batch_size, dim]
"""
e_mask_unsqueeze = e_mask.unsqueeze(1) # [b, 1, j-i+1]
length_tensor = (e_mask != 0).sum(dim=1).unsqueeze(1) # [batch_size, 1]
# [b, 1, j-i+1] * [b, j-i+1, dim] = [b, 1, dim] -> [b, dim]
sum_vector = torch.bmm(e_mask_unsqueeze.float(), hidden_output).squeeze(1)
avg_vector = sum_vector.float() / length_tensor.float() # broadcasting
return avg_vector
def forward(self, input_ids, attention_mask, token_type_ids, labels, e1_mask, e2_mask):
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids
) # sequence_output, pooled_output, (hidden_states), (attentions)
sequence_output = outputs[0]
pooled_output = outputs[1] # [CLS]
# Average
e1_h = self.entity_average(sequence_output, e1_mask)
e2_h = self.entity_average(sequence_output, e2_mask)
# Dropout -> tanh -> fc_layer (Share FC layer for e1 and e2)
pooled_output = self.cls_fc_layer(pooled_output)
e1_h = self.entity_fc_layer(e1_h)
e2_h = self.entity_fc_layer(e2_h)
# Concat -> fc_layer
concat_h = torch.cat([pooled_output, e1_h, e2_h], dim=-1)
logits = self.label_classifier(concat_h)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
# Softmax
if labels is not None:
if self.num_labels == 1:
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.MSELoss",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss"
] | 1.6.0 | isotrforever/R-BERT | 99e986cab12f2d91f2445c651908c8a18c8c9efe |
1.0 | ###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
import data
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f)
model.eval()
if args.cuda:
model.cuda()
else:
model.cpu()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(1)
with torch.no_grad():
input = torch.rand(1, 1).mul(ntokens).long()
if args.cuda:
input = input.cuda()
with open(args.outf, 'w') as outf:
for i in range(args.words):
output, hidden = model(input, hidden)
word_weights = output.squeeze().float().data.div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.data.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
| [
"torch.rand",
"torch.no_grad",
"torch.manual_seed",
"torch.multinomial",
"torch.cuda.is_available",
"torch.load"
] | 1.0 | Cubbee/apex | 0a991543846966d5f586540dc2441e512139e9fc |
1.0 | """ Group all tests cases for layers"""
import pytest
import torch
from polaris.network.layers import SqueezeExcitation, ResidualBlock2D
def test_squeeze_excitation():
X = torch.tensor([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]])
se = SqueezeExcitation(channels=1, ratio=1)
se.dense_linear_1.weight.data = torch.tensor([[4.0]])
se.dense_linear_1.bias.data = torch.tensor([[2.0]])
se.dense_linear_2.weight.data = torch.tensor([[-0.1], [2.0]])
se.dense_linear_2.bias.data = torch.tensor([0.1, -3])
output = se(X)
expected = torch.tensor([[[[41.109, 41.218, 41.327], [41.436, 41.545, 41.655], [41.764, 41.873, 41.982]]]])
assert pytest.approx(expected.detach().numpy(), abs=1e-3) == output.detach().numpy()
def test_residual_block():
X = torch.tensor([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]])
rb = ResidualBlock2D(channels=1, kernel_size=3, se_ratio=1)
rb.conv_layer_1.weight.data = torch.tensor([[[[0.0, 1, 0.0], [1, 2, 1], [0.0, 1, 0.0]]]])
rb.conv_layer_2.weight.data = torch.tensor([[[[0.0, 1, 0.0], [1, 1, 1], [0.0, 1, 0.0]]]])
rb.batch_norm_1.weight.data = torch.tensor([0.1])
rb.batch_norm_2.weight.data = torch.tensor([1.0])
rb.squeeze_ex.dense_linear_1.weight.data = torch.tensor([[0.0]])
rb.squeeze_ex.dense_linear_1.bias.data = torch.tensor([[0.0]])
rb.squeeze_ex.dense_linear_2.weight.data = torch.tensor([[1.0], [1.0]])
rb.squeeze_ex.dense_linear_2.bias.data = torch.tensor([1.0, 0.0])
output = rb(X)
expected = torch.tensor([[[[0.000, 1.351, 2.282], [3.535, 5.685, 6.340], [7.018, 9.076, 9.823]]]])
assert pytest.approx(expected.detach().numpy(), abs=1e-3) == output.detach().numpy()
| [
"torch.tensor"
] | 1.0 | leelastar/leelastar-training | b6b4a36c48c418fcc0bd3ccb7f9c2e95e29f26c9 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
class RobertaSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class RobertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
class RobertaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = RobertaSelfAttention(config)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class RobertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
class RobertaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = RobertaAttention(config)
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
class RobertaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RobertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ROBERTA_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """, ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.Softmax",
"torch.einsum",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.tensor",
"torch.tanh",
"torch.matmul",
"torch.nn.Embedding",
"torch.cumsum"
] | 1.0 | reichang182/Transformer | 6f90c29eaaba898919b7689ab7e2cfce1604cdb8 |
1.0 | # coding=utf-8
# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RAG model implementation."""
from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple
import torch
from ...configuration_utils import PretrainedConfig
from ...file_utils import add_start_docstrings_to_model_forward, replace_return_docstrings
from ...generation_beam_search import BeamSearchScorer
from ...modeling_outputs import ModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "RagConfig"
@dataclass
class RetrievAugLMMarginOutput(ModelOutput):
"""
Base class for retriever augmented marginalized models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
each vocabulary token.
doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and
:obj:`question_encoder_last_hidden_state`.
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_heads, sequence_length, embed_size_per_head)`).
Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
(see :obj:`past_key_values` input) to speed up sequential decoding.
retrieved_doc_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs, hidden_size)`, `optional`, returned when `output_retrieved=True`):
Embedded documents retrieved by the retriever. Is used with ``question_encoder_last_hidden_state`` to
compute the ``doc_scores``.
retrieved_doc_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, config.n_docs)`, `optional`, returned when `output_retrieved=True`):
The indexes of the embedded documents retrieved by the retriever.
context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the
retriever.
question_encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
model.
question_enc_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
question_enc_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_enc_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
generator_enc_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
generator_enc_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_dec_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
generator_dec_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
doc_scores: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
retrieved_doc_embeds: Optional[torch.FloatTensor] = None
retrieved_doc_ids: Optional[torch.LongTensor] = None
context_input_ids: Optional[torch.LongTensor] = None
context_attention_mask: Optional[torch.LongTensor] = None
question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
question_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
question_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
generator_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
generator_dec_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class RetrievAugLMOutput(ModelOutput):
"""
Args:
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
each vocabulary token.
doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and
:obj:`question_encoder_last_hidden_state`.
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_heads, sequence_length, embed_size_per_head)`).
Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
(see :obj:`past_key_values` input) to speed up sequential decoding.
retrieved_doc_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs, hidden_size)`, `optional`, returned when `output_retrieved=True`):
Embedded documents retrieved by the retriever. Is used with ``question_encoder_last_hidden_state`` to
compute the ``doc_scores``.
retrieved_doc_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, config.n_docs)`, `optional`, returned when `output_retrieved=True`):
The indexes of the embedded documents retrieved by the retriever.
context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the
retriever.
question_encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
model.
question_enc_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
question_enc_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_enc_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
generator_enc_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
generator_enc_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_dec_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
generator_dec_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
logits: torch.FloatTensor = None
doc_scores: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
retrieved_doc_embeds: Optional[torch.FloatTensor] = None
retrieved_doc_ids: Optional[torch.LongTensor] = None
context_input_ids: Optional[torch.LongTensor] = None
context_attention_mask: Optional[torch.LongTensor] = None
question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
question_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
question_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
generator_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
generator_dec_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
class RagPreTrainedModel(PreTrainedModel):
r"""
RAG models were released with the paper `Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks
<https://arxiv.org/abs/2005.11401>`_ by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
"""
config_class = RagConfig
base_model_prefix = "rag"
_keys_to_ignore_on_load_missing = [r"position_ids"]
@classmethod
def from_pretrained_question_encoder_generator(
cls,
question_encoder_pretrained_model_name_or_path: str = None,
generator_pretrained_model_name_or_path: str = None,
retriever: RagRetriever = None,
*model_args,
**kwargs
) -> PreTrainedModel:
r"""
Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
model checkpoints.
The model is set in evaluation mode by default using :obj:`model.eval()` (Dropout modules are deactivated). To
train the model, you need to first set it back in training mode with :obj:`model.train()`.
Params:
question_encoder_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`):
Information necessary to initiate the question encoder. Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
generator_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`):
Information necessary to initiate the generator. Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (remaining positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
retriever (:class:`~transformers.RagRetriever`, `optional`):
The retriever to use.
kwwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
``output_attentions=True``).
- To update the question_encoder configuration, use the prefix `question_encoder_` for each
configuration parameter.
- To update the generator configuration, use the prefix `generator_` for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a :obj:`config` is provided or automatically loaded.
Example::
>>> from transformers import RagModel
>>> # initialize a RAG from two pretrained models.
>>> model = RagModel.from_question_encoder_generator_pretrained('facebook/dpr-question_encoder-single-nq-base', 't5-small')
>>> # saving model after fine-tuning
>>> model.save_pretrained("./rag")
>>> # load fine-tuned model
>>> model = RagModel.from_pretrained("./rag")
"""
kwargs_question_encoder = {
argument[len("question_question_encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("question_encoder_")
}
kwargs_generator = {
argument[len("generator_") :]: value
for argument, value in kwargs.items()
if argument.startswith("generator_")
}
# remove question_encoder, generator kwargs from kwargs
for key in kwargs_question_encoder.keys():
del kwargs["question_encoder_" + key]
for key in kwargs_generator.keys():
del kwargs["generator_" + key]
# Load and initialize the question_encoder and generator
# The distinction between question_encoder and generator at the model level is made
# by the value of the flag `is_generator` that we need to set correctly.
question_encoder = kwargs_question_encoder.pop("model", None)
if question_encoder is None:
assert (
question_encoder_pretrained_model_name_or_path is not None
), "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to be defined"
from ..auto.modeling_auto import AutoModel
if "config" not in kwargs_question_encoder:
from ..auto.configuration_auto import AutoConfig
question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path)
kwargs_question_encoder["config"] = question_encoder_config
question_encoder = AutoModel.from_pretrained(
question_encoder_pretrained_model_name_or_path, *model_args, **kwargs_question_encoder
)
generator = kwargs_generator.pop("model", None)
if generator is None:
assert (
generator_pretrained_model_name_or_path is not None
), "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has to be defined"
from ..auto.modeling_auto import AutoModelForSeq2SeqLM
if "config" not in kwargs_generator:
from ..auto.configuration_auto import AutoConfig
generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path)
kwargs_generator["config"] = generator_config
generator = AutoModelForSeq2SeqLM.from_pretrained(
generator_pretrained_model_name_or_path, **kwargs_generator
)
# instantiate config with corresponding kwargs
config = kwargs.get("config", None)
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
RAG_START_DOCSTRING = r"""
RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward
pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context
documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.
The question encoder can be any `autoencoding` model, preferably :class:`~transformers.DPRQuestionEncoder`, and the
generator can be any `seq2seq` model, preferably :class:`~transformers.BartForConditionalGeneration`.
The model can be initialized with a :class:`~transformers.RagRetriever` for end-to-end generation or used in
combination with the outputs of a retriever in multiple steps---see examples for more details. The model is
compatible any `autoencoding` model as the ``question_encoder`` and any `seq2seq` model with language model head as
the ``generator``. It has been tested with :class:`~transformers.DPRQuestionEncoder` as the ``question_encoder``
and :class:`~transformers.BartForConditionalGeneration` or :class:`~transformers.T5ForConditionalGeneration` as the
``generator``.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Args:
config (:class:`~transformers.RagConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
question_encoder (:class:`transformers.PreTrainedModel`):
An encoder model compatible with the faiss index encapsulated by the ``retriever``.
generator (:class:`transformers.PreTrainedModel`):
A seq2seq model used as the generator in the RAG architecture.
retriever (:class:`~transformers.RagRetriever`):
A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.
"""
RAG_FORWARD_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. :class:`~transformers.RagConfig`, used to initialize
the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that
tokenizer class to obtain the indices.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`)
Tuple consists of (:obj:`generator_enc_last_hidden_state`, `optional`: :obj:`generator_enc_hidden_states`,
`optional`: :obj:`generator_enc_attentions`). :obj:`generator_enc_last_hidden_state` of shape
:obj:`(batch_size, n_docs * sequence_length, hidden_size)` is a sequence of hidden-states at the output of
the last layer of the generator's encoder.
Used by the (:class:`~transformers.RagModel`) model during decoding.
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for generation tasks. `None` by default, construct as per instructions for the generator model
you're using with your RAG instance.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`):
Tuple consists of two elements: :obj:`encoder_outputs` of the RAG model (see :obj:`encoder_outputs`) and
:obj:`past_key_values` of the underlying generator. Can be used to speed up decoding.
:obj:`past_key_values` are used in the (:class:`~transformers.RagTokenForGeneration`) model during
decoding.
doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and
:obj:`question_encoder_last_hidden_state`. If the model has is not initialized with a ``retriever``
:obj:`doc_scores` has to be provided to the forward pass. :obj:`doc_scores` can be computed via
:obj:`question_encoder_last_hidden_state` and :obj:`retrieved_doc_embeds`, see examples for more
information.
context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Input IDs post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the
retriever.
If the model has is not initialized with a ``retriever`` :obj:`context_input_ids` has to be provided to the
forward pass. :obj:`context_input_ids` are returned by :meth:`~transformers.RagRetriever.__call__`.
context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the
retriever.
If the model has is not initialized with a ``retriever`` :obj:`context_attention_mask` has to be provided
to the forward pass. :obj:`context_attention_mask` are returned by
:meth:`~transformers.RagRetriever.__call__`.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
output_retrieved(:obj:`bool`, `optional`):
Whether or not to return the :obj:`retrieved_doc_embeds`, :obj:`retrieved_doc_ids`,
:obj:`context_input_ids` and :obj:`context_attention_mask`. See returned tensors for more detail.
n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`)
Number of documents to retrieve and/or number of documents for which to generate an answer.
"""
@add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)
class RagModel(RagPreTrainedModel):
def __init__(
self,
config: Optional[PretrainedConfig] = None,
question_encoder: Optional[PreTrainedModel] = None,
generator: Optional[PreTrainedModel] = None,
retriever: Optional = None, # or maybe just use a `set_retriever(...)` method
**kwargs,
):
assert config is not None or (
question_encoder is not None and generator is not None
), "Either a configuration or an question_encoder and a generator has to be provided."
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
else:
assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}"
super().__init__(config)
if question_encoder is None:
from ..auto.modeling_auto import AutoModel
question_encoder = AutoModel.from_config(config.question_encoder)
if generator is None:
from ..auto.modeling_auto import AutoModelForSeq2SeqLM
generator = AutoModelForSeq2SeqLM.from_config(config.generator)
self.retriever = retriever
if self.retriever is not None:
assert isinstance(
retriever, RagRetriever
), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`"
self.retriever = retriever
self.question_encoder = question_encoder
self.generator = generator
@add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=RetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
past_key_values=None,
doc_scores=None,
context_input_ids=None,
context_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
output_retrieved=None,
n_docs=None,
):
r"""
Returns:
Example::
>>> from transformers import RagTokenizer, RagRetriever, RagModel
>>> import torch
>>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-base")
>>> retriever = RagRetriever.from_pretrained("facebook/rag-token-base", index_name="exact", use_dummy_dataset=True)
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever)
>>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
>>> outputs = model(input_ids=inputs["input_ids"])
"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved
# whether retriever has to be used
has_to_retrieve = (
self.retriever is not None
and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
and encoder_outputs is None
)
# encoder_outputs are pre-computed during RAG-token generation
if encoder_outputs is None:
if has_to_retrieve:
question_enc_outputs = self.question_encoder(
input_ids, attention_mask=attention_mask, return_dict=True
)
question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder
retriever_outputs = self.retriever(
input_ids,
question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (
retriever_outputs["context_input_ids"],
retriever_outputs["context_attention_mask"],
retriever_outputs["retrieved_doc_embeds"],
retriever_outputs["doc_ids"],
)
# set to correct device
retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(
question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
).squeeze(1)
else:
assert (
context_input_ids is not None
), "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
assert (
context_attention_mask is not None
), "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
assert (
doc_scores is not None
), "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
assert (
doc_scores is not None
), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function."
assert (
doc_scores.shape[1] % n_docs
) == 0, f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}."
# Decoder input without context documents
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0)
gen_outputs = self.generator(
input_ids=context_input_ids,
attention_mask=context_attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
return_dict=True,
)
if not has_to_retrieve:
question_encoder_last_hidden_state = None
question_enc_hidden_states = None
question_enc_attentions = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
else:
question_enc_hidden_states = question_enc_outputs.hidden_states
question_enc_attentions = question_enc_outputs.attentions
if not has_to_retrieve or not output_retrieved:
# don't output retrieved docs
context_input_ids = (None,)
context_attention_mask = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
return RetrievAugLMOutput(
logits=gen_outputs.logits,
doc_scores=doc_scores,
past_key_values=gen_outputs.past_key_values,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
retrieved_doc_embeds=retrieved_doc_embeds,
retrieved_doc_ids=retrieved_doc_ids,
question_encoder_last_hidden_state=question_encoder_last_hidden_state,
question_enc_hidden_states=question_enc_hidden_states,
question_enc_attentions=question_enc_attentions,
generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,
generator_enc_hidden_states=gen_outputs.encoder_hidden_states,
generator_enc_attentions=gen_outputs.encoder_attentions,
generator_dec_hidden_states=gen_outputs.decoder_hidden_states,
generator_dec_attentions=gen_outputs.decoder_attentions,
generator_cross_attentions=gen_outputs.cross_attentions,
)
@add_start_docstrings_to_model_forward(
"""
A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.
""",
RAG_START_DOCSTRING,
)
class RagSequenceForGeneration(RagPreTrainedModel):
def __init__(
self,
config: Optional[PretrainedConfig] = None,
question_encoder: Optional[PreTrainedModel] = None,
generator: Optional[PreTrainedModel] = None,
retriever: Optional = None,
**kwargs,
):
assert config is not None or (
question_encoder is not None and generator is not None
), "Either a configuration or an encoder and a generator has to be provided."
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
super().__init__(config)
# instantiate model
self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
def set_retriever(self, retriever: RagRetriever):
self.rag.retriever = retriever
@add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
past_key_values=None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
output_retrieved=None,
exclude_bos_score=None,
reduce_loss=None,
labels=None,
n_docs=None,
**kwargs # needs kwargs for generation
):
r"""
exclude_bos_score (:obj:`bool`, `optional`):
Only relevant if ``labels`` is passed. If :obj:`True`, the score of the BOS token is disregarded when
computing the loss.
reduce_loss (:obj:`bool`, `optional`):
Only relevant if ``labels`` is passed. If :obj:`True`, the NLL loss is reduced using the
``torch.Tensor.sum`` operation.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Legacy dictionary, which is required so that model can use `generate()` function.
Returns:
Example::
>>> from transformers import RagTokenizer, RagRetriever, RagSequenceForGeneration
>>> import torch
>>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
>>> retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True)
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
>>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
>>> with tokenizer.as_target_tokenizer():
... targets = tokenizer("In Paris, there are 10 million people.", return_tensors="pt")
>>> input_ids = inputs["input_ids"]
>>> labels = targets["input_ids"]
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> # or use retriever separately
>>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True)
>>> # 1. Encode
>>> question_hidden_states = model.question_encoder(input_ids)[0]
>>> # 2. Retrieve
>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
>>> doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)).squeeze(1)
>>> # 3. Forward to generator
>>> outputs = model(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=labels)
"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score
reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = labels
use_cache = False
outputs = self.rag(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_retrieved=output_retrieved,
n_docs=n_docs,
)
loss = None
if labels is not None:
loss = self.get_nll(
outputs.logits,
outputs.doc_scores,
decoder_input_ids,
reduce_loss=reduce_loss,
epsilon=self.config.label_smoothing,
exclude_bos_score=exclude_bos_score,
n_docs=n_docs,
)
return RetrievAugLMMarginOutput(
loss=loss,
logits=outputs.logits,
doc_scores=outputs.doc_scores,
past_key_values=outputs.past_key_values,
context_input_ids=outputs.context_input_ids,
context_attention_mask=outputs.context_attention_mask,
retrieved_doc_embeds=outputs.retrieved_doc_embeds,
retrieved_doc_ids=outputs.retrieved_doc_ids,
question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
question_enc_hidden_states=outputs.question_enc_hidden_states,
question_enc_attentions=outputs.question_enc_attentions,
generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
generator_enc_hidden_states=outputs.generator_enc_hidden_states,
generator_enc_attentions=outputs.generator_enc_attentions,
generator_dec_hidden_states=outputs.generator_dec_hidden_states,
generator_dec_attentions=outputs.generator_dec_attentions,
generator_cross_attentions=outputs.generator_cross_attentions,
)
@property
def retriever(self):
return self.rag.retriever
@property
def generator(self):
return self.rag.generator
@property
def question_encoder(self):
return self.rag.question_encoder
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
do_deduplication=None, # defaults to True
num_return_sequences=None, # defaults to 1
num_beams=None, # defaults to 1
n_docs=None,
**model_kwargs
):
"""
Implements RAG sequence "thorough" decoding. Read the :meth:`~transformers.PreTrainedModel.generate``
documentation for more information on how to set other generate input parameters.
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`input_ids` is not passed, then
:obj:`context_input_ids` has to be provided.
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
retriever.
context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by
the retriever.
If the model is not initialized with a ``retriever`` or ``input_ids`` is not given,
:obj:`context_input_ids` and :obj:`context_attention_mask` have to be provided to the forward pass.
They are returned by :meth:`~transformers.RagRetriever.__call__`.
doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and
:obj:`question_encoder_last_hidden_state`.
If the model is not initialized with a ``retriever`` or ``input_ids`` is not given, :obj:`doc_scores`
has to be provided to the forward pass. :obj:`doc_scores` are returned by
:meth:`~transformers.RagRetriever.__call__`.
do_deduplication (:obj:`bool`, `optional`):
Whether or not to deduplicate the generations from different context documents for a given input. Has
to be set to :obj:`False` if used while training with distributed backend.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch. Note that this
is not the value we pass to the ``generator``'s `:func:`~transformers.PreTrainedModel.generate``
function, where we set ``num_return_sequences`` to :obj:`num_beams`.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`)
Number of documents to retrieve and/or number of documents for which to generate an answer.
kwargs:
Additional kwargs will be passed to :meth:`~transformers.PreTrainedModel.generate`.
Return:
:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated
sequences. The second dimension (sequence length) is either equal to :obj:`max_length` or shorter if all
batches finished early due to the :obj:`eos_token_id`.
"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
num_doc_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
num_beams = num_beams if num_beams is not None else self.config.num_beams
assert (
input_ids is not None or context_input_ids is not None
), " At least one of input_ids or context_input_ids must be given"
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
context_input_ids = self.retriever(
input_ids,
question_hidden_states.cpu().detach().to(torch.float32).numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="pt",
)["context_input_ids"]
# set to correct device
context_input_ids = context_input_ids.to(input_ids)
hypos = []
model_kwargs["num_beams"] = num_beams
model_kwargs["num_return_sequences"] = num_beams
model_kwargs["attention_mask"] = None
batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
for index in range(batch_size):
# first, generate beams from documents:
generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
output_sequences = self.generator.generate(
generator_input_ids,
**model_kwargs,
) # n_docs * n_beam, tgt_len
if do_deduplication:
# do_deduplication, max_output_len
output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values()))
num_candidates = output_sequences.shape[
0
] # after deduplication, this number can be less than n_docs*n_beam
# then, run model forwards to get nll scores:
if input_ids is not None:
new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1)
outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
else: # input_ids is None, need context_input_ids/mask and doc_scores
assert (
context_attention_mask is not None
), "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
assert (
doc_scores is not None
), "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
individual_input_ids = generator_input_ids.repeat(
num_candidates, 1
) # (num_candidates*n_docs, max_len)
individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)
individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs]
outputs = self(
context_input_ids=individual_input_ids,
context_attention_mask=individual_attention_mask,
doc_scores=individual_doc_scores,
labels=output_sequences,
exclude_bos_score=True,
)
top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1]
# add hypothesis
hypos.append(output_sequences[top_cand_inds])
return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
def get_nll(
self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
):
# shift tokens left
target = torch.cat(
[target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
)
n_docs = n_docs if n_docs is not None else self.config.n_docs
# bos_token_id is None for T5
bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all()
def _mask_pads(ll, smooth_obj):
pad_mask = target.eq(self.config.generator.pad_token_id)
if pad_mask.any():
ll.masked_fill_(pad_mask, 0.0)
smooth_obj.masked_fill_(pad_mask, 0.0)
return ll.squeeze(-1), smooth_obj.squeeze(-1)
# seq_logits dim = (batch*n_docs, tgt_len , #vocabs)
seq_logprobs = torch.nn.functional.log_softmax(seq_logits, dim=-1).view(
seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
) # batch_size x n_docs x tgt_len x #vocab_size
doc_logprobs = torch.nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1)
# RAG-sequence marginalization
first_token_scores = seq_logprobs[:, :, :1, :]
second_token_scores = seq_logprobs[:, :, 1:2, :]
remainder = seq_logprobs[:, :, 2:, :]
rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2)
# calculate loss
target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)
assert target.dim() == rag_logprobs.dim()
ll = rag_logprobs.gather(dim=-1, index=target)
smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
ll, smooth_obj = _mask_pads(ll, smooth_obj)
# sum over tokens, exclude bos while scoring
ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)
smooth_obj = smooth_obj.sum(2)
ll = ll.logsumexp(1) # logsumexp over docs
smooth_obj = smooth_obj.logsumexp(1)
nll_loss = -ll
smooth_loss = -smooth_obj
if reduce_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / rag_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
@staticmethod
def _cat_and_pad(tensors, pad_token_id):
output = (
tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id)
)
ind = 0
for t in tensors:
output[ind : ind + t.shape[0], : t.shape[1]] = t
ind += t.shape[0]
return output
@add_start_docstrings_to_model_forward(
"""
A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
""",
RAG_START_DOCSTRING,
)
class RagTokenForGeneration(RagPreTrainedModel):
def __init__(
self,
config: Optional[PretrainedConfig] = None,
question_encoder: Optional[PreTrainedModel] = None,
generator: Optional[PreTrainedModel] = None,
retriever: Optional = None,
**kwargs,
):
assert config is not None or (
question_encoder is not None and generator is not None
), "Either a configuration or an encoder and a generator has to be provided."
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
super().__init__(config)
# instantiate model
self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
def set_retriever(self, retriever: RagRetriever):
self.rag.retriever = retriever
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
use_cache=None,
encoder_outputs=None,
doc_scores=None,
n_docs=None,
**kwargs
):
if past is not None:
# if past is defined use only last decoder_input_ids
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None,
"encoder_outputs": encoder_outputs,
"doc_scores": doc_scores,
"context_attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"past_key_values": past,
"use_cache": use_cache,
"do_marginalize": True,
"n_docs": n_docs,
}
@property
def retriever(self):
return self.rag.retriever
@property
def generator(self):
return self.rag.generator
@property
def question_encoder(self):
return self.rag.question_encoder
@staticmethod
def _reorder_cache(past, beam_idx):
"""Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs"""
def _reorder_stacked(hidden_states, new_order):
n_docs = hidden_states.shape[0] // new_order.shape[0]
hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:])
hidden_states = hidden_states.index_select(0, new_order)
result = hidden_states.view(-1, *hidden_states.shape[2:])
return result
reordered_past = ()
for layer_past in past:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
reordered_past += (tuple(_reorder_stacked(past_state, beam_idx) for past_state in layer_past),)
return reordered_past
def marginalize(self, seq_logits, doc_scores, n_docs=None):
n_docs = n_docs if n_docs is not None else self.config.n_docs
# RAG-token marginalization
seq_logprobs = torch.nn.functional.log_softmax(seq_logits, dim=-1).view(
seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
)
doc_logprobs = torch.log_softmax(doc_scores, dim=1)
log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1)
return torch.logsumexp(log_prob_sum, dim=1)
@add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
past_key_values=None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
output_retrieved=None,
do_marginalize=None,
reduce_loss=None,
labels=None,
n_docs=None,
**kwargs # needs kwargs for generation
):
r"""
do_marginalize (:obj:`bool`, `optional`):
If :obj:`True`, the logits are marginalized over all documents by making use of
``torch.nn.functional.log_softmax``.
reduce_loss (:obj:`bool`, `optional`):
Only relevant if ``labels`` is passed. If :obj:`True`, the NLL loss is reduced using the
``torch.Tensor.sum`` operation.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Legacy dictionary, which is required so that model can use `generate()` function.
Returns:
Example::
>>> from transformers import RagTokenizer, RagRetriever, RagTokenForGeneration
>>> import torch
>>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
>>> retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True)
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
>>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
>>> with tokenizer.as_target_tokenizer():
... targets = tokenizer("In Paris, there are 10 million people.", return_tensors="pt")
>>> input_ids = inputs["input_ids"]
>>> labels = targets["input_ids"]
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> # or use retriever separately
>>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True)
>>> # 1. Encode
>>> question_hidden_states = model.question_encoder(input_ids)[0]
>>> # 2. Retrieve
>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
>>> doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)).squeeze(1)
>>> # 3. Forward to generator
>>> outputs = model(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=labels)
>>> # or directly generate
>>> generated = model.generate(context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores)
>>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize
reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = labels
use_cache = False
outputs = self.rag(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_retrieved=output_retrieved,
n_docs=n_docs,
)
loss = None
logits = outputs.logits
if labels is not None:
assert decoder_input_ids is not None
loss = self.get_nll(
outputs.logits,
outputs.doc_scores,
labels,
reduce_loss=reduce_loss,
epsilon=self.config.label_smoothing,
n_docs=n_docs,
)
if do_marginalize:
logits = self.marginalize(logits, outputs.doc_scores, n_docs)
return RetrievAugLMMarginOutput(
loss=loss,
logits=logits,
doc_scores=outputs.doc_scores,
past_key_values=outputs.past_key_values,
context_input_ids=outputs.context_input_ids,
context_attention_mask=outputs.context_attention_mask,
retrieved_doc_embeds=outputs.retrieved_doc_embeds,
retrieved_doc_ids=outputs.retrieved_doc_ids,
question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
question_enc_hidden_states=outputs.question_enc_hidden_states,
question_enc_attentions=outputs.question_enc_attentions,
generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
generator_enc_hidden_states=outputs.generator_enc_hidden_states,
generator_enc_attentions=outputs.generator_enc_attentions,
generator_dec_hidden_states=outputs.generator_dec_hidden_states,
generator_dec_attentions=outputs.generator_dec_attentions,
generator_cross_attentions=outputs.generator_cross_attentions,
)
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
max_length=None,
min_length=None,
early_stopping=None,
use_cache=None,
num_beams=None,
num_beam_groups=None,
diversity_penalty=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
length_penalty=None,
no_repeat_ngram_size=None,
encoder_no_repeat_ngram_size=None,
repetition_penalty=None,
bad_words_ids=None,
num_return_sequences=None,
decoder_start_token_id=None,
n_docs=None,
prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]] = None,
forced_bos_token_id: Optional[int] = None,
forced_eos_token_id: Optional[int] = None,
remove_invalid_values: Optional[bool] = None,
**model_kwargs
):
"""
Implements RAG token decoding.
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`input_ids` is not passed, then
:obj:`context_input_ids` has to be provided.
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Input IDs post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the
retriever.
If the model has is not initialized with a ``retriever``, :obj:`context_input_ids` has to be provided
to the forward pass. :obj:`context_input_ids` are returned by
:meth:`~transformers.RagRetriever.__call__`.
context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by
the retriever.
If the model has is not initialized with a ``retriever``, :obj:`context_input_ids` has to be provided
to the forward pass. :obj:`context_input_ids` are returned by
:meth:`~transformers.RagRetriever.__call__`.
doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and
:obj:`question_encoder_last_hidden_state`.
If the model has is not initialized with a ``retriever``, :obj:`context_input_ids` has to be provided
to the forward pass. :obj:`context_input_ids` are returned by
:meth:`~transformers.RagRetriever.__call__`.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
min_length (:obj:`int`, `optional`, defaults to 10):
The minimum length of the sequence to be generated.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to stop the beam search when at least ``num_beams`` sentences are finished per batch or
not.
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
bos_token_id (:obj:`int`, `optional`):
The id of the `beginning-of-sequence` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty.
Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in
order to encourage the model to produce longer sequences.
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
encoder_no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size that occur in the ``encoder_input_ids`` cannot occur in the
``decoder_input_ids``.
bad_words_ids(:obj:`List[int]`, `optional`):
List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
num_beam_groups (:obj:`int`, `optional`, defaults to 1):
Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of
beams. `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details.
diversity_penalty (:obj:`float`, `optional`, defaults to 0.0):
This value is subtracted from a beam's score if it generates a token same as any beam from other group
at a particular time. Note that :obj:`diversity_penalty` is only effective if ``group beam search`` is
enabled.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch. Note that this
is not the value we pass to the ``generator``'s `:func:`~transformers.PreTrainedModel.generate`
function, where we set ``num_return_sequences`` to :obj:`num_beams`.
decoder_start_token_id (:obj:`int`, `optional`):
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`)
Number of documents to retrieve and/or number of documents for which to generate an answer.
prefix_allowed_tokens_fn: (:obj:`Callable[[int, torch.Tensor], List[int]]`, `optional`):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments :obj:`inputs_ids` and the batch ID
:obj:`batch_id`. It has to return a list with the allowed tokens for the next generation step
conditioned on the previously generated tokens :obj:`inputs_ids` and the batch ID :obj:`batch_id`. This
argument is useful for constrained generation conditioned on the prefix, as described in
`Autoregressive Entity Retrieval <https://arxiv.org/abs/2010.00904>`__.
forced_bos_token_id (:obj:`int`, `optional`):
The id of the token to force as the first generated token after the :obj:`decoder_start_token_id`.
Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token
needs to be the target language token.
forced_eos_token_id (:obj:`int`, `optional`):
The id of the token to force as the last generated token when :obj:`max_length` is reached.
remove_invalid_values (:obj:`bool`, `optional`):
Whether to remove possible `nan` and `inf` outputs of the model to prevent the generation method to
crash. Note that using ``remove_invalid_values`` can slow down generation.
Return:
:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated
sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all
batches finished early due to the :obj:`eos_token_id`.
"""
# set default parameters
n_docs = n_docs if n_docs is not None else self.config.n_docs
num_beams = num_beams if num_beams is not None else self.config.num_beams
num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups
max_length = max_length if max_length is not None else self.config.max_length
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
bos_token_id = bos_token_id if bos_token_id is not None else self.config.generator.bos_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.generator.eos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.generator.pad_token_id
use_cache = use_cache if use_cache is not None else self.config.use_cache
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.config.generator.decoder_start_token_id
)
remove_invalid_values = (
remove_invalid_values if remove_invalid_values is not None else self.config.remove_invalid_values
)
# retrieve docs
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = self.retriever(
input_ids,
question_hidden_states.cpu().detach().to(torch.float32).numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# set to correct device
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1
)
assert (
context_input_ids.shape[0] % n_docs
) == 0, f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}."
# batch_size
batch_size = context_input_ids.shape[0] // n_docs
encoder = self.rag.generator.get_encoder()
encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True)
input_ids = torch.full(
(batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
last_hidden_state = encoder_outputs["last_hidden_state"]
def extend_enc_output(tensor, num_beams=None):
# split into `batch_size`, `num_beams`, `num_docs`
tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:])
# repeat same last hidden states over `num_beams` dimension
tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:])
# merge `batch_size`, `num_beams`, `num_docs` dims again
return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:])
# correctly extend last_hidden_state and attention mask
context_attention_mask = extend_enc_output(context_attention_mask, num_beams=num_beams)
encoder_outputs["last_hidden_state"] = extend_enc_output(last_hidden_state, num_beams=num_beams)
doc_scores = doc_scores.repeat_interleave(num_beams, dim=0)
# define start_len & additional parameters
model_kwargs["doc_scores"] = doc_scores
model_kwargs["encoder_outputs"] = encoder_outputs
model_kwargs["attention_mask"] = context_attention_mask
model_kwargs["n_docs"] = n_docs
pre_processor = self._get_logits_processor(
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,
encoder_input_ids=context_input_ids,
bad_words_ids=bad_words_ids,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
forced_bos_token_id=forced_bos_token_id,
forced_eos_token_id=forced_eos_token_id,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
num_beams=num_beams,
num_beam_groups=num_beam_groups,
diversity_penalty=diversity_penalty,
remove_invalid_values=remove_invalid_values,
)
if num_beams == 1:
if num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search."
)
return self.greedy_search(
input_ids,
logits_processor=pre_processor,
max_length=max_length,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
**model_kwargs,
)
elif num_beams > 1:
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
if num_return_sequences > num_beams:
raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=num_beams,
device=self.device,
length_penalty=length_penalty,
do_early_stopping=early_stopping,
num_beam_hyps_to_keep=num_return_sequences,
)
return self.beam_search(
input_ids,
beam_scorer,
logits_processor=pre_processor,
max_length=max_length,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
**model_kwargs,
)
else:
raise ValueError(f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {num_beams}")
def get_input_embeddings(self):
return self.rag.generator.get_input_embeddings()
def get_output_embeddings(self):
return self.rag.generator.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.rag.generator.set_output_embeddings(new_embeddings)
def shift_tokens_right(self, input_ids, start_token_id=None):
"""Shift input ids one token to the right, and pad with start_token_id"""
if start_token_id is None:
start_token_id = self.config.decoder_start_token_id
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = start_token_id
return shifted_input_ids
def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
n_docs = n_docs if n_docs is not None else self.config.n_docs
# shift tokens left
target = torch.cat(
[target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
)
def _mask_pads(ll, smooth_obj):
pad_mask = target.eq(self.config.generator.pad_token_id)
if pad_mask.any():
ll.masked_fill_(pad_mask, 0.0)
smooth_obj.masked_fill_(pad_mask, 0.0)
return ll.squeeze(-1), smooth_obj.squeeze(-1)
rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
target = target.unsqueeze(-1)
assert target.dim() == rag_logprobs.dim()
ll = rag_logprobs.gather(dim=-1, index=target)
smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
ll, smooth_obj = _mask_pads(ll, smooth_obj)
ll = ll.sum(1) # sum over tokens
smooth_obj = smooth_obj.sum(1)
nll_loss = -ll
smooth_loss = -smooth_obj
if reduce_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / rag_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
| [
"torch.cat",
"torch.log_softmax",
"torch.no_grad",
"torch.nn.functional.log_softmax",
"torch.logsumexp"
] | 1.0 | reichang182/Transformer | 6f90c29eaaba898919b7689ab7e2cfce1604cdb8 |
1.2 | import os
import time
import logging
import argparse
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.nn as nn
from util import dataset, transform, config
from util.util import AverageMeter, intersectionAndUnion, check_makedirs, colorize
cv2.ocl.setUseOpenCL(False)
label_mapping = {
7: 0, 8: 1, 11: 2, 12: 3,
13: 4, 17: 5,
19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11,
25: 12, 26: 13, 27: 14, 28: 15,
31: 16, 32: 17, 33: 18}
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')
parser.add_argument('--config', type=str, default='config/cityscapes/cityscapes_deeplab50mem.yaml', help='config file')
parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
assert args.config is not None
cfg = config.load_cfg_from_cfg_file(args.config)
if args.opts is not None:
cfg = config.merge_cfg_from_list(cfg, args.opts)
return cfg
def get_logger():
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def check(args):
assert args.classes > 1
assert args.zoom_factor in [1, 2, 4, 8]
assert args.split in ['train', 'val', 'test']
if args.arch == 'psp':
assert (args.train_h - 1) % 8 == 0 and (args.train_w - 1) % 8 == 0
elif args.arch == "deeplabv3":
assert args.train_h % 1 == 0 and args.train_w % 1 == 0
elif args.arch == 'psa':
if args.compact:
args.mask_h = (args.train_h - 1) // (8 * args.shrink_factor) + 1
args.mask_w = (args.train_w - 1) // (8 * args.shrink_factor) + 1
else:
assert (args.mask_h is None and args.mask_w is None) or (args.mask_h is not None and args.mask_w is not None)
if args.mask_h is None and args.mask_w is None:
args.mask_h = 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1
args.mask_w = 2 * ((args.train_w - 1) // (8 * args.shrink_factor) + 1) - 1
else:
assert (args.mask_h % 2 == 1) and (args.mask_h >= 3) and (
args.mask_h <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)
assert (args.mask_w % 2 == 1) and (args.mask_w >= 3) and (
args.mask_w <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)
else:
raise Exception('architecture not supported yet'.format(args.arch))
def main():
global args, logger
args = get_parser()
# check(args)
logger = get_logger()
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.test_gpu)
logger.info(args)
logger.info("=> creating model ...")
logger.info("Classes: {}".format(args.classes))
value_scale = 255
mean = [0.485, 0.456, 0.406]
# mean = [0.39068785, 0.40521392, 0.41434407]
mean = [item * value_scale for item in mean]
std = [0.229, 0.224, 0.225]
# std = [0.29652068, 0.30514979, 0.30080369]
std = [item * value_scale for item in std]
gray_folder = os.path.join(args.save_folder, 'gray')
color_folder = os.path.join(args.save_folder, 'color')
test_transform = transform.Compose([transform.ToTensor()])
test_data = dataset.SemData(split=args.split, data_root=args.data_root, data_list=args.test_list, transform=test_transform)
index_start = args.index_start
if args.index_step == 0:
index_end = len(test_data.data_list)
else:
index_end = min(index_start + args.index_step, len(test_data.data_list))
test_data.data_list = test_data.data_list[index_start:index_end]
test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)
colors = np.loadtxt(args.colors_path).astype('uint8')
names = [line.rstrip('\n') for line in open(args.names_path)]
criterion = nn.CrossEntropyLoss(ignore_index=args.ignore_label)
if not args.has_prediction:
if args.arch == 'psp':
from model.pspnet import PSPNet
model = PSPNet(backbone=args.backbone, output_stride=args.output_stride, layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, pretrained=False)
elif args.arch == 'psa':
from model.psanet import PSANet
model = PSANet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, compact=args.compact,
shrink_factor=args.shrink_factor, mask_h=args.mask_h, mask_w=args.mask_w,
normalization_factor=args.normalization_factor, psa_softmax=args.psa_softmax, pretrained=False)
elif args.arch == 'deeplabv3':
from model.deeplabv3 import Deeplabv3
model = Deeplabv3(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor,
backbone=args.backbone, output_stride=args.output_stride, pretrained=False,
criterion=criterion)
elif args.arch == 'danet':
from model.danet import DANet
model = DANet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor,
backbone=args.backbone, output_stride=args.output_stride, pretrained=False,
criterion=criterion)
elif args.arch == 'trseg':
from model.transformnet import TransformNet
model = TransformNet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor,
backbone=args.backbone, output_stride=args.output_stride, pretrained=False,
criterion=criterion)
elif args.arch == 'hrnet':
from model.hrnet import HighResolutionNet
model = HighResolutionNet(args)
logger.info(model)
model = torch.nn.DataParallel(model).cuda()
# model = model.cuda()
cudnn.benchmark = True
# if os.path.isfile(args.model_path):
# logger.info("=> loading checkpoint '{}'".format(args.model_path))
# checkpoint = torch.load(args.model_path)
# model.load_state_dict(checkpoint['state_dict'], strict=True)
# logger.info("=> loaded checkpoint '{}'".format(args.model_path))
# else:
# raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path))
eval(test_loader, test_data.data_list, model, args.classes, mean, std, args.base_size, args.test_h, args.test_w, args.scales, gray_folder, color_folder, colors)
if args.split != 'test':
cal_acc(test_data.data_list, gray_folder, args.classes, names)
def net_process(model, image, mean, std=None, flip=True):
input = torch.from_numpy(image.transpose((2, 0, 1))).float()
if std is None:
for t, m in zip(input, mean):
t.sub_(m)
else:
for t, m, s in zip(input, mean, std):
t.sub_(m).div_(s)
input = input.unsqueeze(0).cuda()
if flip:
input = torch.cat([input, input.flip(3)], 0)
with torch.no_grad():
output = model(input)
_, _, h_i, w_i = input.shape
_, _, h_o, w_o = output.shape
if (h_o != h_i) or (w_o != w_i):
output = F.interpolate(output, (h_i, w_i), mode='bilinear', align_corners=True)
output = F.softmax(output, dim=1)
if flip:
output = (output[0] + output[1].flip(2)) / 2
else:
output = output[0]
output = output.data.cpu().numpy()
output = output.transpose(1, 2, 0)
return output
def scale_process(model, image, classes, crop_h, crop_w, h, w, mean, std=None, stride_rate=2/3):
ori_h, ori_w, _ = image.shape
pad_h = max(crop_h - ori_h, 0)
pad_w = max(crop_w - ori_w, 0)
pad_h_half = int(pad_h / 2)
pad_w_half = int(pad_w / 2)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=mean)
new_h, new_w, _ = image.shape
stride_h = int(np.ceil(crop_h*stride_rate))
stride_w = int(np.ceil(crop_w*stride_rate))
grid_h = int(np.ceil(float(new_h-crop_h)/stride_h) + 1)
grid_w = int(np.ceil(float(new_w-crop_w)/stride_w) + 1)
prediction_crop = np.zeros((new_h, new_w, classes), dtype=float)
count_crop = np.zeros((new_h, new_w), dtype=float)
for index_h in range(0, grid_h):
for index_w in range(0, grid_w):
s_h = index_h * stride_h
e_h = min(s_h + crop_h, new_h)
s_h = e_h - crop_h
s_w = index_w * stride_w
e_w = min(s_w + crop_w, new_w)
s_w = e_w - crop_w
image_crop = image[s_h:e_h, s_w:e_w].copy()
count_crop[s_h:e_h, s_w:e_w] += 1
prediction_crop[s_h:e_h, s_w:e_w, :] += net_process(model, image_crop, mean, std)
prediction_crop /= np.expand_dims(count_crop, 2)
prediction_crop = prediction_crop[pad_h_half:pad_h_half+ori_h, pad_w_half:pad_w_half+ori_w]
prediction = cv2.resize(prediction_crop, (w, h), interpolation=cv2.INTER_LINEAR)
return prediction
def trainID2labelID(img):
temp = np.copy(img)
for k, v in label_mapping.items():
temp[img == v] = k
return temp
def labelID2trainID(img):
temp = np.copy(img)
for k, v in label_mapping.items():
temp[img == k] = v
return temp
def eval(test_loader, data_list, model, classes, mean, std, base_size, crop_h, crop_w, scales, gray_folder, color_folder, colors):
logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
data_time = AverageMeter()
batch_time = AverageMeter()
model.eval()
end = time.time()
for i, (input, _) in enumerate(test_loader):
data_time.update(time.time() - end)
input = np.squeeze(input.numpy(), axis=0)
image = np.transpose(input, (1, 2, 0))
h, w, _ = image.shape
prediction = np.zeros((h, w, classes), dtype=float)
for scale in scales:
long_size = round(scale * base_size)
new_h = long_size
new_w = long_size
if h > w:
new_w = round(long_size/float(h)*w)
else:
new_h = round(long_size/float(w)*h)
image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
prediction += scale_process(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
prediction /= len(scales)
prediction = np.argmax(prediction, axis=2)
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
logger.info('Test: [{}/{}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, len(test_loader),
data_time=data_time,
batch_time=batch_time))
check_makedirs(gray_folder)
check_makedirs(color_folder)
gray = np.uint8(prediction)
color = colorize(gray, colors)
image_path, _ = data_list[i]
image_name = image_path.split('/')[-1].split('.')[0]
gray_path = os.path.join(gray_folder, image_name + '.png')
color_path = os.path.join(color_folder, image_name + '.png')
if args.split == 'test':
gray_labelid = trainID2labelID(gray)
# save_gray_path = gray_path.replace('_leftImg8bit','*')
cv2.imwrite(gray_path, gray_labelid)
# if make_video:
else:
cv2.imwrite(gray_path, gray)
color.save(color_path)
logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
def cal_acc(data_list, pred_folder, classes, names):
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
for i, (image_path, target_path) in enumerate(data_list):
image_name = image_path.split('/')[-1].split('.')[0]
pred = cv2.imread(os.path.join(pred_folder, image_name+'.png'), cv2.IMREAD_GRAYSCALE)
target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
if "RELLIS" in target_path:
target = dataset.rellis_labelID2trainID(target)
intersection, union, target = intersectionAndUnion(pred, target, classes)
intersection_meter.update(intersection)
union_meter.update(union)
target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
logger.info('Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name+'.png', accuracy))
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = np.mean(iou_class)
mAcc = np.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
for i in range(classes):
logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i], names[i]))
if __name__ == '__main__':
main()
| [
"torch.nn.DataParallel",
"torch.no_grad",
"torch.nn.functional.interpolate",
"torch.utils.data.DataLoader",
"torch.nn.functional.softmax",
"torch.nn.CrossEntropyLoss"
] | 1.2.0 | youngsjjn/MemSeg | a3daf8039dc2c763d366f4bfd07c87416cf8ec81 |
1.2 | from __future__ import absolute_import
from __future__ import division
import torch
from torch import nn
from torch.nn import functional as F
import torchvision
from ..utils.torchtools import weights_init_kaiming
__all__ = ['ResNet50', 'ResNet101', 'ResNet50M', 'ResNet50B']
class ResNet50(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.classifier = nn.Linear(2048, num_classes)
self.feat_dim = 2048
def forward(self, x):
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
f = x.view(x.size(0), -1)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet101(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet101, self).__init__()
self.loss = loss
resnet101 = torchvision.models.resnet101(pretrained=True)
self.base = nn.Sequential(*list(resnet101.children())[:-2])
self.classifier = nn.Linear(2048, num_classes)
self.feat_dim = 2048 # feature dimension
def forward(self, x):
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
f = x.view(x.size(0), -1)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50M(nn.Module):
"""ResNet50 + mid-level features.
Reference:
Yu et al. The Devil is in the Middle: Exploiting Mid-level Representations for
Cross-Domain Instance Matching. arXiv:1711.08106.
"""
def __init__(self, num_classes=0, loss={'xent'}, **kwargs):
super(ResNet50M, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
base = nn.Sequential(*list(resnet50.children())[:-2])
self.layers1 = nn.Sequential(base[0], base[1], base[2])
self.layers2 = nn.Sequential(base[3], base[4])
self.layers3 = base[5]
self.layers4 = base[6]
self.layers5a = base[7][0]
self.layers5b = base[7][1]
self.layers5c = base[7][2]
self.fc_fuse = nn.Sequential(nn.Linear(4096, 1024), nn.BatchNorm1d(1024), nn.ReLU())
self.classifier = nn.Linear(3072, num_classes)
self.feat_dim = 3072 # feature dimension
def forward(self, x):
x1 = self.layers1(x)
x2 = self.layers2(x1)
x3 = self.layers3(x2)
x4 = self.layers4(x3)
x5a = self.layers5a(x4)
x5b = self.layers5b(x5a)
x5c = self.layers5c(x5b)
x5a_feat = F.avg_pool2d(x5a, x5a.size()[2:]).view(x5a.size(0), x5a.size(1))
x5b_feat = F.avg_pool2d(x5b, x5b.size()[2:]).view(x5b.size(0), x5b.size(1))
x5c_feat = F.avg_pool2d(x5c, x5c.size()[2:]).view(x5c.size(0), x5c.size(1))
midfeat = torch.cat((x5a_feat, x5b_feat), dim=1)
midfeat = self.fc_fuse(midfeat)
combofeat = torch.cat((x5c_feat, midfeat), dim=1)
if not self.training:
return combofeat
prelogits = self.classifier(combofeat)
if self.loss == {'xent'}:
return prelogits
elif self.loss == {'xent', 'htri'}:
return prelogits, combofeat
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50B(nn.Module):
"""Resnet50+bottleneck
Reference:
https://github.com/L1aoXingyu/reid_baseline
"""
def __init__(self, num_classes=0, loss={'xent'}, **kwargs):
super(ResNet50B, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
resnet50.layer4[0].conv2.stride = (1, 1)
resnet50.layer4[0].downsample[0].stride = (1, 1)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.in_planes = 2048
self.bottleneck = nn.Sequential(
nn.Linear(self.in_planes, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.1),
nn.Dropout(p=0.5))
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(512, num_classes)
self.classifier.apply(weights_init_kaiming)
def forward(self, x):
global_feat = self.base(x)
global_feat = F.avg_pool2d(global_feat, global_feat.size()[-2:])
global_feat = global_feat.view(global_feat.size(0), -1)
if not self.training:
return global_feat
else:
feat = self.bottleneck(global_feat)
y = self.classifier(feat)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, global_feat
else:
raise KeyError("Unsupported loss: {}".format(self.loss)) | [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.LeakyReLU",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d"
] | 1.2.0 | Shengyuan-Z/AGRL.pytorch | 6107fe0e4df5c8048a65f811bab46d2fb4624783 |
1.2 | from __future__ import absolute_import
from __future__ import division
import torch
import torch.nn as nn
import gc
import time
def cur_time():
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
def adjust_learning_rate(optimizer, base_lr, epoch, stepsize, gamma=0.1):
# decay learning rate by 'gamma' for every 'stepsize'
lr = base_lr * (gamma ** (epoch // stepsize))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def set_bn_to_eval(m):
# 1. no update for running mean and var
# 2. scale and shift parameters are still trainable
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
def set_wd(optim, num):
assert isinstance(num, (int, float)), '{} is not int or float'.format(num)
for group in optim.param_groups:
if group['weight_decay'] != num:
group['weight_decay'] = num
def count_num_param(model):
num_param = sum(p.numel() for p in model.parameters()) / 1e+06
if hasattr(model, 'classifier') and isinstance(model.classifier, nn.Module):
# we ignore the classifier because it is unused at test time
num_param -= sum(p.numel() for p in model.classifier.parameters()) / 1e+06
return num_param
def flip_tensor(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.normal_(m.weight, 1.0, 0.001)
nn.init.constant_(m.bias, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif classname.find('Conv') != -1:
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.normal_(m.weight, 1.0, 0.001)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight.data, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
def mem_report():
"""Report the memory usage of the tensor.storage in pytorch
Both on CPUs and GPUs are reported"""
def _mem_report(tensors, mem_type):
'''Print the selected tensors of type
There are two major storage types in our major concern:
- GPU: tensors transferred to CUDA devices
- CPU: tensors remaining on the system memory (usually unimportant)
Args:
- tensors: the tensors of specified type
- mem_type: 'CPU' or 'GPU' in current implementation '''
print('Storage on %s' %(mem_type))
print('-'*LEN)
total_numel = 0
total_mem = 0
visited_data = []
for tensor in tensors:
if tensor.is_sparse:
continue
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.append(data_ptr)
numel = tensor.storage().size()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte
total_mem += mem
element_type = type(tensor).__name__
size = tuple(tensor.size())
print('%s\t\t%s\t\t%.2f' % (
element_type,
size,
mem) )
print('-'*LEN)
print('Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (total_numel, total_mem) )
print('-'*LEN)
LEN = 65
print('='*LEN)
objects = gc.get_objects()
print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
tensors = [obj for obj in objects if torch.is_tensor(obj)]
cuda_tensors = [t for t in tensors if t.is_cuda]
host_tensors = [t for t in tensors if not t.is_cuda]
_mem_report(cuda_tensors, 'GPU')
_mem_report(host_tensors, 'CPU')
print('='*LEN) | [
"torch.nn.init.constant_",
"torch.is_tensor",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.normal_",
"torch.nn.init.xavier_normal_"
] | 1.2.0 | Shengyuan-Z/AGRL.pytorch | 6107fe0e4df5c8048a65f811bab46d2fb4624783 |
1.7 | import sys
import torch
import argparse
from datetime import timedelta
import logging
sys.path.append("../")
sys.path.append("../../")
sys.path.append("../../../")
sys.path.append("../../../../")
sys.path.append("../../../../../")
from experiments.utils import evaluate_mnist_uncertainty
from src.data import *
from src.trainer import Trainer
from src.models import ModelFactory
from src.losses import LOSS_FACTORY
import src.utils as utils
parser = argparse.ArgumentParser("mnist_classifier")
parser.add_argument('--task', type=str, default='classification', help='the main task; defines loss')
parser.add_argument('--model', type=str, default='conv_lenet_bbb', help='the model that we want to train')
parser.add_argument('--learning_rate', type=float,
default=0.001, help='init learning rate')
parser.add_argument('--loss_scaling', type=str,
default='batch', help='smoothing factor')
parser.add_argument('--weight_decay', type=float,
default=0.0, help='weight decay')
parser.add_argument('--data', type=str, default='./../../../data/',
help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='mnist',
help='dataset')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--valid_portion', type=float,
default=0.1, help='portion of training data')
parser.add_argument('--gamma', type=float,
default=.1, help='portion of training data')
parser.add_argument('--sigma_prior', type=float,
default=.1, help='portion of training data')
parser.add_argument('--epochs', type=int, default=100,
help='num of training epochs')
parser.add_argument('--input_size', nargs='+',
default=[1, 1, 28, 28], help='input size')
parser.add_argument('--output_size', type=int,
default=10, help='output size')
parser.add_argument('--samples', type=int,
default=20, help='output size')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--save_last', action='store_true', default=True,
help='whether to just save the last model')
parser.add_argument('--num_workers', type=int,
default=16, help='number of workers')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--debug', action='store_true', help='whether we are currently debugging')
parser.add_argument('--report_freq', type=float,
default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default = 0, help='gpu device ids')
parser.add_argument('--q', action='store_true',
help='whether to do post training quantisation')
parser.add_argument('--at', action='store_true',
help='whether to do training aware quantisation')
def main():
args = parser.parse_args()
load = False
if args.save!='EXP':
load=True
args, writer = utils.parse_args(args)
logging.info('# Start Re-training #')
criterion = LOSS_FACTORY[args.task](args, args.loss_scaling)
model_temp = ModelFactory.get_model
logging.info('## Downloading and preparing data ##')
train_loader, valid_loader= get_train_loaders(args)
if not load:
model= model_temp(args.model, args.input_size, args.output_size, args.at, args)
logging.info('## Model created: ##')
logging.info(model.__repr__())
logging.info('### Loading model to parallel GPUs ###')
model = utils.model_to_gpus(model, args)
logging.info('### Preparing schedulers and optimizers ###')
optimizer = torch.optim.Adam(
model.parameters(),
args.learning_rate,
weight_decay = args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, args.epochs)
logging.info('## Downloading and preparing data ##')
train_loader, valid_loader= get_train_loaders(args)
logging.info('## Beginning Training ##')
train = Trainer(model, criterion, optimizer, scheduler, args)
best_error, train_time, val_time = train.train_loop(
train_loader, valid_loader, writer)
logging.info('## Finished training, the best observed validation error: {}, total training time: {}, total validation time: {} ##'.format(
best_error, timedelta(seconds=train_time), timedelta(seconds=val_time)))
logging.info('## Beginning Plotting ##')
del model
with torch.no_grad():
model = model_temp(args.model, args.input_size, args.output_size, args.q, args)
utils.load_model(model, args.save+"/weights.pt")
logging.info('## Model re-created: ##')
logging.info(model.__repr__())
model = utils.model_to_gpus(model, args)
model.eval()
evaluate_mnist_uncertainty(model, args)
logging.info('# Finished #')
if __name__ == '__main__':
main()
| [
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad"
] | 1.7.0 | tjiagoM/quantised-bayesian-nets | c6ff1db376c366633afa2845b7527cc144ffd3b2 |
1.6 | """Modules containing pytorch classes to fit 3D meshes to images using differentiable rendering."""
import copy
import numpy as np
import scipy.sparse.linalg
import scipy.spatial.transform.rotation
import torch
from . import CameraPytorch, LaplacianRigidEnergyPytorch, Scene3DPytorch
from .triangulated_mesh_pytorch import ColoredTriMeshPytorch as ColoredTriMesh
from .triangulated_mesh_pytorch import TriMeshPytorch as TriMesh
from .. import LaplacianRigidEnergy
def print_grad(name):
# to visualize the gradient of a variable use
# variable_name.register_hook(print_grad('variable_name'))
def hook(grad):
print(f"grad {name} = {grad}")
return hook
def qrot(q, v):
qr = q[None, :].repeat(v.shape[0], 1)
qvec = qr[:, :-1]
uv = torch.cross(qvec, v, dim=1)
uuv = torch.cross(qvec, uv, dim=1)
return v + 2 * (qr[:, [3]] * uv + uuv)
class MeshDepthFitterEnergy(torch.nn.Module):
"""Pytorch module to fit a deformable mesh to a depth image."""
def __init__(self, vertices, faces, euler_init, translation_init, cregu=2000):
super(MeshDepthFitterEnergy, self).__init__()
self.mesh = TriMesh(
faces[:, ::-1].copy(), vertices
) # we do a copy to avoid negative stride not supported by pytorch
object_center = vertices.mean(axis=0)
object_radius = np.max(np.std(vertices, axis=0))
self.camera_center = object_center + np.array([-0.5, 0, 5]) * object_radius
self.scene = Scene3DPytorch()
self.scene.set_mesh(self.mesh)
self.rigid_energy = LaplacianRigidEnergyPytorch(self.mesh, vertices, cregu)
self.Vinit = copy.copy(self.mesh.vertices)
self.Hfactorized = None
self.Hpreconditioner = None
self.transform_quaternion_init = scipy.spatial.transform.Rotation.from_euler(
"zyx", euler_init
).as_quat()
self.transform_translation_init = translation_init
self._vertices = torch.nn.Parameter(
torch.tensor(self.Vinit, dtype=torch.float64)
)
self.quaternion = torch.nn.Parameter(
torch.tensor(self.transform_quaternion_init, dtype=torch.float64)
)
self.translation = torch.nn.Parameter(
torch.tensor(self.transform_translation_init, dtype=torch.float64)
)
def set_max_depth(self, max_depth):
self.scene.max_depth = max_depth
self.scene.set_background(
np.full((self.height, self.width, 1), max_depth, dtype=np.float)
)
def set_depth_scale(self, depth_scale):
self.depthScale = depth_scale
def set_image(self, hand_image, focal=None, distortion=None):
self.width = hand_image.shape[1]
self.height = hand_image.shape[0]
assert hand_image.ndim == 2
self.hand_image = hand_image
if focal is None:
focal = 2 * self.width
rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
t = -rot.T.dot(self.camera_center)
intrinsic = np.array(
[[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]
)
extrinsic = np.column_stack((rot, t))
self.camera = CameraPytorch(
extrinsic=extrinsic, intrinsic=intrinsic, distortion=distortion
)
self.iter = 0
def forward(self):
q_normalized = self.quaternion / self.quaternion.norm()
print(self.quaternion.norm())
vertices_centered = self._vertices - torch.mean(self._vertices, dim=0)[None, :]
v_transformed = qrot(q_normalized, vertices_centered) + self.translation
self.mesh.set_vertices(v_transformed)
depth_scale = 1 * self.depthScale
depth = self.scene.render_depth(
self.CameraMatrix,
width=self.width,
height=self.height,
depth_scale=depth_scale,
)
depth = torch.clamp(depth, 0, self.scene.max_depth)
diff_image = torch.sum(
(depth - torch.tensor(self.hand_image[:, :, None])) ** 2, dim=2
)
self.depth = depth
self.diff_image = diff_image
energy_data = torch.sum(diff_image)
energy_rigid = self.rigid_energy.evaluate(
self._vertices, return_grad=False, return_hessian=False
)
energy = energy_data + energy_rigid
self.loss = energy_data + energy_rigid
print("Energy=%f : EData=%f E_rigid=%f" % (energy, energy_data, energy_rigid))
return self.loss
class MeshDepthFitterPytorchOptim:
"""Pytorch optimizer to fit a deformable mesh to an image."""
def __init__(
self, vertices, faces, euler_init, translation_init, cregu=2000, lr=0.8
):
self.energy = MeshDepthFitterEnergy(
vertices, faces, euler_init, translation_init, cregu
)
params = self.energy.parameters()
self.optimizer = torch.optim.LBFGS(params, lr=0.8, max_iter=1)
# self.optimizer = torch.optim.SGD(params, lr=0.000005, momentum=0.1,
# dampening=0.1 )
# self.optimizer =torch.optim.RMSprop(params, lr=1e-3, alpha=0.99, eps=1e-8,
# weight_decay=0, momentum=0.001)
# self.optimizer = torch.optim.Adadelta(params, lr=0.1, rho=0.95,
# eps=1e-6, weight_decay=0)
# self.optimizer = torch.optim.Adagrad(self.energy.parameters(), lr=0.02)
def set_image(self, depth_image, focal):
self.energy.set_image(depth_image, focal=focal)
def set_max_depth(self, max_depth):
self.energy.set_max_depth(max_depth)
def set_depth_scale(self, depth_scale):
self.energy.set_depth_scale(depth_scale)
def step(self):
def closure():
self.optimizer.zero_grad()
loss = self.energy()
loss.backward()
return loss
self.optimizer.step(closure)
# self.iter += 1
return (
self.energy.loss,
self.energy.Depth[:, :, 0].detach().numpy(),
self.energy.diffImage.detach().numpy(),
)
class MeshDepthFitter:
"""Class to fit a deformable mesh to a depth image."""
def __init__(
self,
vertices,
faces,
euler_init,
translation_init,
cregu=2000,
inertia=0.96,
damping=0.05,
):
self.cregu = cregu
self.inertia = inertia
self.damping = damping
self.step_factor_vertices = 0.0005
self.step_max_vertices = 0.5
self.step_factor_quaternion = 0.00006
self.step_max_quaternion = 0.1
self.step_factor_translation = 0.00005
self.step_max_translation = 0.1
self.mesh = TriMesh(
faces.copy()
) # we do a copy to avoid negative stride not support by pytorch
object_center = vertices.mean(axis=0) + translation_init
object_radius = np.max(np.std(vertices, axis=0))
self.camera_center = object_center + np.array([-0.5, 0, 5]) * object_radius
self.scene = Scene3DPytorch()
self.scene.set_mesh(self.mesh)
self.rigid_energy = LaplacianRigidEnergy(self.mesh, vertices, cregu)
self.vertices_init = torch.tensor(copy.copy(vertices))
self.Hfactorized = None
self.Hpreconditioner = None
self.set_mesh_transform_init(euler=euler_init, translation=translation_init)
self.reset()
def set_mesh_transform_init(self, euler, translation):
self.transform_quaternion_init = scipy.spatial.transform.Rotation.from_euler(
"zyx", euler
).as_quat()
self.transform_translation_init = translation
def reset(self):
self.vertices = copy.copy(self.vertices_init)
self.speed_vertices = np.zeros(self.vertices_init.shape)
self.transform_quaternion = copy.copy(self.transform_quaternion_init)
self.transform_translation = copy.copy(self.transform_translation_init)
self.speed_translation = np.zeros(3)
self.speed_quaternion = np.zeros(4)
def set_max_depth(self, max_depth):
self.scene.max_depth = max_depth
self.scene.set_background(
np.full((self.height, self.width, 1), max_depth, dtype=np.float)
)
def set_depth_scale(self, depth_scale):
self.depthScale = depth_scale
def set_image(self, hand_image, focal=None, distortion=None):
self.width = hand_image.shape[1]
self.height = hand_image.shape[0]
assert hand_image.ndim == 2
self.hand_image = hand_image
if focal is None:
focal = 2 * self.width
rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
trans = -rot.T.dot(self.camera_center)
intrinsic = np.array(
[[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]
)
extrinsic = np.column_stack((rot, trans))
self.camera = CameraPytorch(
extrinsic=extrinsic,
intrinsic=intrinsic,
width=self.width,
height=self.height,
distortion=distortion,
)
self.iter = 0
def step(self):
self.vertices = self.vertices - torch.mean(self.vertices, dim=0)[None, :]
# vertices_with_grad = self.vertices.clone().requires_grad(True)
vertices_with_grad = self.vertices.clone().detach().requires_grad_(True)
vertices_with_grad_centered = (
vertices_with_grad - torch.mean(vertices_with_grad, dim=0)[None, :]
)
quaternion_with_grad = torch.tensor(
self.transform_quaternion, dtype=torch.float64, requires_grad=True
)
translation_with_grad = torch.tensor(
self.transform_translation, dtype=torch.float64, requires_grad=True
)
q_normalized = (
quaternion_with_grad / quaternion_with_grad.norm()
) # that will lead to a gradient that is in the tangeant space
vertices_with_grad_transformed = (
qrot(q_normalized, vertices_with_grad_centered) + translation_with_grad
)
self.mesh.set_vertices(vertices_with_grad_transformed)
depth_scale = 1 * self.depthScale
depth = self.scene.render_depth(
self.camera, width=self.width, height=self.height, depth_scale=depth_scale
)
depth = torch.clamp(depth, 0, self.scene.max_depth)
diff_image = torch.sum(
(depth - torch.tensor(self.hand_image[:, :, None])) ** 2, dim=2
)
loss = torch.sum(diff_image)
loss.backward()
energy_data = loss.detach().numpy()
grad_data = vertices_with_grad.grad.numpy()
(
energy_rigid,
grad_rigidity,
approx_hessian_rigidity,
) = self.rigid_energy.evaluate(self.vertices.numpy())
energy = energy_data + energy_rigid
print("Energy=%f : EData=%f E_rigid=%f" % (energy, energy_data, energy_rigid))
# update v
grad = grad_data + grad_rigidity
def mult_and_clamp(x, a, t):
return np.minimum(np.maximum(x * a, -t), t)
# update vertices
step_vertices = mult_and_clamp(
-grad, self.step_factor_vertices, self.step_max_vertices
)
self.speed_vertices = (1 - self.damping) * (
self.speed_vertices * self.inertia + (1 - self.inertia) * step_vertices
)
self.vertices = self.vertices + torch.tensor(self.speed_vertices)
# update rotation
step_quaternion = mult_and_clamp(
-quaternion_with_grad.grad.numpy(),
self.step_factor_quaternion,
self.step_max_quaternion,
)
self.speed_quaternion = (1 - self.damping) * (
self.speed_quaternion * self.inertia + (1 - self.inertia) * step_quaternion
)
self.transform_quaternion = self.transform_quaternion + self.speed_quaternion
self.transform_quaternion = self.transform_quaternion / np.linalg.norm(
self.transform_quaternion
)
# update translation
step_translation = mult_and_clamp(
-translation_with_grad.grad.numpy(),
self.step_factor_translation,
self.step_max_translation,
)
self.speed_translation = (1 - self.damping) * (
self.speed_translation * self.inertia
+ (1 - self.inertia) * step_translation
)
self.transform_translation = self.transform_translation + self.speed_translation
self.iter += 1
return energy, depth[:, :, 0].detach().numpy(), diff_image.detach().numpy()
class MeshRGBFitterWithPose:
"""Class to fit a deformable mesh to a color image."""
def __init__(
self,
vertices,
faces,
euler_init,
translation_init,
default_color,
default_light,
cregu=2000,
inertia=0.96,
damping=0.05,
update_lights=True,
update_color=True,
):
self.cregu = cregu
self.inertia = inertia
self.damping = damping
self.step_factor_vertices = 0.0005
self.step_max_vertices = 0.5
self.step_factor_quaternion = 0.00006
self.step_max_quaternion = 0.05
self.step_factor_translation = 0.00005
self.step_max_translation = 0.1
self.default_color = default_color
self.default_light = default_light
self.update_lights = update_lights
self.update_color = update_color
self.mesh = ColoredTriMesh(
faces.copy()
) # we do a copy to avoid negative stride not support by pytorch
object_center = vertices.mean(axis=0) + translation_init
object_radius = np.max(np.std(vertices, axis=0))
self.camera_center = object_center + np.array([0, 0, 9]) * object_radius
self.scene = Scene3DPytorch()
self.scene.set_mesh(self.mesh)
self.rigid_energy = LaplacianRigidEnergyPytorch(self.mesh, vertices, cregu)
self.vertices_init = torch.tensor(copy.copy(vertices))
self.Hfactorized = None
self.Hpreconditioner = None
self.set_mesh_transform_init(euler=euler_init, translation=translation_init)
self.reset()
def set_background_color(self, background_color):
self.scene.set_background(
np.tile(background_color[None, None, :], (self.height, self.width, 1))
)
def set_mesh_transform_init(self, euler, translation):
self.transform_quaternion_init = scipy.spatial.transform.Rotation.from_euler(
"zyx", euler
).as_quat()
self.transform_translation_init = translation
def reset(self):
self.vertices = copy.copy(self.vertices_init)
self.speed_vertices = np.zeros(self.vertices.shape)
self.transform_quaternion = copy.copy(self.transform_quaternion_init)
self.transform_translation = copy.copy(self.transform_translation_init)
self.speed_translation = np.zeros(3)
self.speed_quaternion = np.zeros(4)
self.hand_color = copy.copy(self.default_color)
self.light_directional = copy.copy(self.default_light["directional"])
self.light_ambient = copy.copy(self.default_light["ambient"])
self.speed_light_directional = np.zeros(self.light_directional.shape)
self.speed_light_ambient = np.zeros(self.light_ambient.shape)
self.speed_hand_color = np.zeros(self.hand_color.shape)
def set_image(self, hand_image, focal=None, distortion=None):
self.width = hand_image.shape[1]
self.height = hand_image.shape[0]
assert hand_image.ndim == 3
self.hand_image = hand_image
if focal is None:
focal = 2 * self.width
rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
trans = -rot.T.dot(self.camera_center)
intrinsic = np.array(
[[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]
)
extrinsic = np.column_stack((rot, trans))
self.camera = CameraPytorch(
extrinsic=extrinsic,
intrinsic=intrinsic,
width=self.width,
height=self.height,
distortion=distortion,
)
self.iter = 0
def step(self):
self.vertices = self.vertices - torch.mean(self.vertices, dim=0)[None, :]
vertices_with_grad = self.vertices.clone().detach().requires_grad_(True)
vertices_with_grad_centered = (
vertices_with_grad - torch.mean(vertices_with_grad, dim=0)[None, :]
)
quaternion_with_grad = torch.tensor(
self.transform_quaternion, dtype=torch.float64, requires_grad=True
)
translation_with_grad = torch.tensor(
self.transform_translation, dtype=torch.float64, requires_grad=True
)
light_directional_with_grad = torch.tensor(
self.light_directional, dtype=torch.float64, requires_grad=True
)
light_ambient_with_grad = torch.tensor(
self.light_ambient, dtype=torch.float64, requires_grad=True
)
hand_color_with_grad = torch.tensor(
self.hand_color, dtype=torch.float64, requires_grad=True
)
q_normalized = (
quaternion_with_grad / quaternion_with_grad.norm()
) # that will lead to a gradient that is in the tangeant space
vertices_with_grad_transformed = (
qrot(q_normalized, vertices_with_grad_centered) + translation_with_grad
)
self.mesh.set_vertices(vertices_with_grad_transformed)
self.scene.set_light(
light_directional=light_directional_with_grad,
light_ambient=light_ambient_with_grad,
)
self.mesh.set_vertices_colors(
hand_color_with_grad.repeat([self.mesh.nb_vertices, 1])
)
image = self.scene.render(self.camera)
diff_image = torch.sum((image - torch.tensor(self.hand_image)) ** 2, dim=2)
loss = torch.sum(diff_image)
loss.backward()
energy_data = loss.detach().numpy()
grad_data = vertices_with_grad.grad
(
energy_rigid,
grad_rigidity,
approx_hessian_rigidity,
) = self.rigid_energy.evaluate(self.vertices)
energy = energy_data + energy_rigid.numpy()
print("Energy=%f : EData=%f E_rigid=%f" % (energy, energy_data, energy_rigid))
# update v
grad = grad_data + grad_rigidity
def mult_and_clamp(x, a, t):
return np.minimum(np.maximum(x * a, -t), t)
inertia = self.inertia
# update vertices
step_vertices = mult_and_clamp(
-grad.numpy(), self.step_factor_vertices, self.step_max_vertices
)
self.speed_vertices = (1 - self.damping) * (
self.speed_vertices * inertia + (1 - inertia) * step_vertices
)
self.vertices = self.vertices + torch.tensor(self.speed_vertices)
# update rotation
step_quaternion = mult_and_clamp(
-quaternion_with_grad.grad.numpy(),
self.step_factor_quaternion,
self.step_max_quaternion,
)
self.speed_quaternion = (1 - self.damping) * (
self.speed_quaternion * inertia + (1 - inertia) * step_quaternion
)
self.transform_quaternion = self.transform_quaternion + self.speed_quaternion
self.transform_quaternion = self.transform_quaternion / np.linalg.norm(
self.transform_quaternion
)
# update translation
step_translation = mult_and_clamp(
-translation_with_grad.grad.numpy(),
self.step_factor_translation,
self.step_max_translation,
)
self.speed_translation = (1 - self.damping) * (
self.speed_translation * inertia + (1 - inertia) * step_translation
)
self.transform_translation = self.transform_translation + self.speed_translation
# update directional light
step = -light_directional_with_grad.grad.numpy() * 0.0001
self.speed_light_directional = (1 - self.damping) * (
self.speed_light_directional * inertia + (1 - inertia) * step
)
self.light_directional = self.light_directional + self.speed_light_directional
# update ambient light
step = -light_ambient_with_grad.grad.numpy() * 0.0001
self.speed_light_ambient = (1 - self.damping) * (
self.speed_light_ambient * inertia + (1 - inertia) * step
)
self.light_ambient = self.light_ambient + self.speed_light_ambient
# update hand color
step = -hand_color_with_grad.grad.numpy() * 0.00001
self.speed_hand_color = (1 - self.damping) * (
self.speed_hand_color * inertia + (1 - inertia) * step
)
self.hand_color = self.hand_color + self.speed_hand_color
self.iter += 1
return energy, image.detach().numpy(), diff_image.detach().numpy()
| [
"torch.clamp",
"torch.tensor",
"torch.cross",
"torch.optim.LBFGS",
"torch.mean",
"torch.sum"
] | 1.6.0 | synapticarbors/DEODR | e67f1792de90669b9adbf1a8103a9ca3b2c2c3dc |
1.6 | import numpy as np
from .downloader import load_trained_model
from ..parse_base import BaseParser, BaseInputExample
from .spacy_extensions import ConstituentData, NonConstituentException
import torch
class PartialConstituentData:
def __init__(self):
self.starts = [np.array([], dtype=int)]
self.ends = [np.array([], dtype=int)]
self.labels = [np.array([], dtype=int)]
def finalize(self, doc, label_vocab):
self.starts = np.hstack(self.starts)
self.ends = np.hstack(self.ends)
self.labels = np.hstack(self.labels)
# TODO(nikita): Python for loops aren't very fast
loc_to_constituent = np.full(len(doc), -1, dtype=int)
prev = None
for position in range(self.starts.shape[0]):
if self.starts[position] != prev:
prev = self.starts[position]
loc_to_constituent[self.starts[position]] = position
return ConstituentData(
self.starts, self.ends, self.labels, loc_to_constituent, label_vocab
).serialize()
class SentenceWrapper(BaseInputExample):
TEXT_NORMALIZATION_MAPPING = {
"`": "'",
"«": '"',
"»": '"',
"‘": "'",
"’": "'",
"“": '"',
"”": '"',
"„": '"',
"‹": "'",
"›": "'",
"—": "--", # em dash
}
def __init__(self, spacy_sent):
self.sent = spacy_sent
@property
def words(self):
return [
self.TEXT_NORMALIZATION_MAPPING.get(token.text, token.text)
for token in self.sent
]
@property
def space_after(self):
return [bool(token.whitespace_) for token in self.sent]
@property
def tree(self):
return None
def leaves(self):
return self.words
def pos(self):
return [(word, "UNK") for word in self.words]
class BeneparComponent:
"""
Berkeley Neural Parser (benepar) component for spaCy.
Sample usage:
>>> nlp = spacy.load('en_core_web_md')
>>> if spacy.__version__.startswith('2'):
nlp.add_pipe(BeneparComponent("benepar_en3"))
else:
nlp.add_pipe("benepar", config={"model": "benepar_en3"})
>>> doc = nlp("The quick brown fox jumps over the lazy dog.")
>>> sent = list(doc.sents)[0]
>>> print(sent._.parse_string)
This component is only responsible for constituency parsing and (for some
trained models) part-of-speech tagging. It should be preceded in the
pipeline by other components that can, at minimum, perform tokenization and
sentence segmentation.
"""
name = "benepar"
def __init__(
self,
name,
subbatch_max_tokens=500,
disable_tagger=False,
batch_size="ignored",
):
"""Load a trained parser model.
Args:
name (str): Model name, or path to pytorch saved model
subbatch_max_tokens (int): Maximum number of tokens to process in
each batch
disable_tagger (bool, default False): Unless disabled, the parser
will set predicted part-of-speech tags for the document,
overwriting any existing tags provided by spaCy models or
previous pipeline steps. This option has no effect for parser
models that do not have a part-of-speech tagger built in.
batch_size: deprecated and ignored; use subbatch_max_tokens instead
"""
self._parser = load_trained_model(name)
if torch.cuda.is_available():
self._parser.cuda()
self.subbatch_max_tokens = subbatch_max_tokens
self.disable_tagger = disable_tagger
self._label_vocab = self._parser.config["label_vocab"]
label_vocab_size = max(self._label_vocab.values()) + 1
self._label_from_index = [()] * label_vocab_size
for label, i in self._label_vocab.items():
if label:
self._label_from_index[i] = tuple(label.split("::"))
else:
self._label_from_index[i] = ()
self._label_from_index = tuple(self._label_from_index)
if not self.disable_tagger:
tag_vocab = self._parser.config["tag_vocab"]
tag_vocab_size = max(tag_vocab.values()) + 1
self._tag_from_index = [()] * tag_vocab_size
for tag, i in tag_vocab.items():
self._tag_from_index[i] = tag
self._tag_from_index = tuple(self._tag_from_index)
else:
self._tag_from_index = None
def __call__(self, doc):
"""Update the input document with predicted constituency parses."""
# TODO(https://github.com/nikitakit/self-attentive-parser/issues/16): handle
# tokens that consist entirely of whitespace.
constituent_data = PartialConstituentData()
wrapped_sents = [SentenceWrapper(sent) for sent in doc.sents]
for sent, parse in zip(
doc.sents,
self._parser.parse(
wrapped_sents,
return_compressed=True,
subbatch_max_tokens=self.subbatch_max_tokens,
),
):
constituent_data.starts.append(parse.starts + sent.start)
constituent_data.ends.append(parse.ends + sent.start)
constituent_data.labels.append(parse.labels)
if parse.tags is not None and not self.disable_tagger:
for i, tag_id in enumerate(parse.tags):
sent[i].tag_ = self._tag_from_index[tag_id]
doc._._constituent_data = constituent_data.finalize(doc, self._label_from_index)
return doc
def create_benepar_component(
nlp,
name,
model: str,
subbatch_max_tokens: int,
disable_tagger: bool,
):
return BeneparComponent(
model,
subbatch_max_tokens=subbatch_max_tokens,
disable_tagger=disable_tagger,
)
def register_benepar_component_factory():
# Starting with spaCy 3.0, nlp.add_pipe no longer directly accepts
# BeneparComponent instances. We must instead register a component factory.
import spacy
if spacy.__version__.startswith("2"):
return
from spacy.language import Language
Language.factory(
"benepar",
default_config={
"subbatch_max_tokens": 500,
"disable_tagger": False,
},
func=create_benepar_component,
)
try:
register_benepar_component_factory()
except ImportError:
pass
| [
"torch.cuda.is_available"
] | 1.6.0 | boehm-e/self-attentive-parser | 24a50b529d38cc182082e4e72bbf79d1b24ec1da |
1.9 | """
Tests ideas are taken mostly from https://github.com/dalab/hyperbolic_nn/blob/master/util.py with some changes
"""
import torch
import random
import numpy as np
import pytest
import warnings
import itertools
import geoopt
from geoopt.manifolds import stereographic
@pytest.fixture(scope="function", autouse=True, params=range(30, 40))
def seed(request):
seed = request.param
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
return seed
@pytest.fixture(
scope="function", params=[torch.float64, torch.float32], ids=["float64", "float32"]
)
def dtype(request):
return request.param
def tolerant_allclose_check(a, b, strict=True, **tolerance):
if strict:
np.testing.assert_allclose(a.detach(), b.detach(), **tolerance)
else:
try:
np.testing.assert_allclose(a.detach(), b.detach(), **tolerance)
except AssertionError as e:
assert not torch.isnan(a).any(), "Found nans"
assert not torch.isnan(b).any(), "Found nans"
warnings.warn(
"Unstable numerics: " + " | ".join(str(e).splitlines()[3:6]),
RuntimeWarning,
)
@pytest.fixture(params=[True, False], ids=["negative", "positive"])
def negative(request):
return request.param
@pytest.fixture()
def strict(seed, dtype, negative):
return seed in {30, 31} and dtype == torch.float64 or negative
# c = -k
@pytest.fixture
def c(seed, dtype, negative):
# test broadcasted and non broadcasted versions
if seed == 30: # strict seed
c = torch.tensor(0.0).to(dtype)
elif seed == 31: # strict seed too
c = torch.tensor(1.0).to(dtype)
elif seed == 39:
c = 10 ** torch.arange(-15, 1, dtype=dtype)[:, None]
elif seed == 35:
c = torch.zeros(100, 1, dtype=dtype)
elif seed > 35:
c = torch.rand(100, 1, dtype=dtype)
else:
c = torch.tensor(random.random()).to(dtype)
if not negative:
c = -c
return c.requires_grad_(True)
@pytest.fixture
def k(c):
return -c
@pytest.fixture
def manifold(k):
return stereographic.Stereographic(k=k, learnable=True)
@pytest.fixture
def B(c):
if c.dim() > 1:
return c.shape[0]
else:
return 100
@pytest.fixture
def a(seed, c, manifold, B, dtype):
r = manifold.radius
a = torch.empty(B, 10, dtype=dtype).normal_(-1, 1)
a /= a.norm(dim=-1, keepdim=True)
a *= torch.where(torch.isfinite(r), r, torch.ones((), dtype=dtype)).clamp_max_(100)
a *= torch.rand_like(a)
return manifold.projx(a).detach().requires_grad_(True)
@pytest.fixture
def b(seed, c, manifold, B, dtype):
r = manifold.radius
a = torch.empty(B, 10, dtype=dtype).normal_(-1, 1)
a /= a.norm(dim=-1, keepdim=True)
a *= torch.where(torch.isfinite(r), r, torch.ones((), dtype=dtype)).clamp_max_(100)
a *= torch.rand_like(a)
return manifold.projx(a).detach().requires_grad_(True)
@pytest.fixture
def logunif_input(dtype):
inp = 10 ** torch.arange(-15, 1, dtype=dtype)
inp = torch.cat([-inp.flip(0), torch.zeros([1], dtype=dtype), inp])
return inp.requires_grad_(True)
def test_tanh_grad(logunif_input):
stereographic.math.tanh(logunif_input).sum().backward()
assert torch.isfinite(logunif_input.grad).all()
def test_artanh_grad(logunif_input):
stereographic.math.artanh(logunif_input).sum().backward()
assert torch.isfinite(logunif_input.grad).all()
def test_arsinh_grad(logunif_input):
stereographic.math.arsinh(logunif_input).sum().backward()
assert torch.isfinite(logunif_input.grad).all()
def test_tan_k_grad(logunif_input):
k = logunif_input.detach().clone().requires_grad_()
stereographic.math.tan_k(logunif_input[None], k[:, None]).sum().backward()
assert torch.isfinite(logunif_input.grad).all()
assert torch.isfinite(k.grad).all()
def test_artan_k_grad(logunif_input):
k = logunif_input.detach().clone().requires_grad_()
stereographic.math.artan_k(logunif_input[None], k[:, None]).sum().backward()
assert torch.isfinite(logunif_input.grad).all()
assert torch.isfinite(k.grad).all()
def test_arsin_k_grad(logunif_input):
k = logunif_input.detach().clone().requires_grad_()
stereographic.math.arsin_k(logunif_input[None], k[:, None]).sum().backward()
assert torch.isfinite(logunif_input.grad).all()
assert torch.isfinite(k.grad).all()
def test_sin_k_grad(logunif_input):
k = logunif_input.detach().clone().requires_grad_()
stereographic.math.sin_k(logunif_input[None], k[:, None]).sum().backward()
assert torch.isfinite(logunif_input.grad).all()
assert torch.isfinite(k.grad).all()
def test_project_k_grad(logunif_input):
vec = logunif_input[:, None] * torch.ones(logunif_input.shape[0], 10)
k = logunif_input.detach().clone().requires_grad_()
stereographic.math.project(vec, k=k[:, None]).sum().backward()
assert torch.isfinite(logunif_input.grad).all()
assert torch.isfinite(k.grad).all()
def test_mobius_addition_left_cancelation(a, b, manifold, dtype):
res = manifold.mobius_add(-a, manifold.mobius_add(a, b))
tolerance = {torch.float32: dict(atol=5e-5, rtol=5e-4), torch.float64: dict()}
np.testing.assert_allclose(res.detach(), b.detach(), **tolerance[dtype])
res.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_mobius_addition_zero_a(b, manifold):
a = torch.zeros_like(b)
res = manifold.mobius_add(a, b)
np.testing.assert_allclose(res.detach(), b.detach())
res.sum().backward()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_mobius_addition_zero_b(a, c, manifold):
b = torch.zeros_like(a)
res = manifold.mobius_add(a, b)
np.testing.assert_allclose(res.detach(), a.detach())
res.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_mobius_addition_negative_cancellation(a, manifold, dtype):
res = manifold.mobius_add(a, -a)
tolerance = {
torch.float32: dict(atol=1e-4, rtol=1e-6),
torch.float64: dict(atol=1e-6),
}
np.testing.assert_allclose(res.detach(), torch.zeros_like(res), **tolerance[dtype])
res.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_mobius_negative_addition(a, b, manifold, dtype):
res = manifold.mobius_add(-b, -a)
res1 = -manifold.mobius_add(b, a)
tolerance = {
torch.float32: dict(atol=1e-7, rtol=1e-6),
torch.float64: dict(atol=1e-10),
}
np.testing.assert_allclose(res.detach(), res1.detach(), **tolerance[dtype])
res.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
@pytest.mark.parametrize("n", list(range(5)))
def test_n_additions_via_scalar_multiplication(n, a, dtype, negative, manifold, strict):
n = torch.as_tensor(n, dtype=a.dtype).requires_grad_()
y = torch.zeros_like(a)
for _ in range(int(n.item())):
y = manifold.mobius_add(a, y)
ny = manifold.mobius_scalar_mul(n, a)
if negative:
tolerance = {
torch.float32: dict(atol=4e-5, rtol=1e-3),
torch.float64: dict(atol=1e-5, rtol=1e-3),
}
else:
tolerance = {
torch.float32: dict(atol=2e-6, rtol=1e-3),
torch.float64: dict(atol=1e-5, rtol=1e-3),
}
tolerant_allclose_check(y, ny, strict=strict, **tolerance[dtype])
ny.sum().backward()
assert torch.isfinite(n.grad).all()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
@pytest.fixture
def r1(seed, dtype, B):
if seed % 3 == 0:
return (
torch.tensor(random.uniform(-1, 1), dtype=dtype)
.detach()
.requires_grad_(True)
)
else:
return (torch.rand(B, 1, dtype=dtype) * 2 - 1).detach().requires_grad_(True)
@pytest.fixture
def r2(seed, dtype, B):
if seed % 3 == 1:
return (
torch.tensor(random.uniform(-1, 1), dtype=dtype)
.detach()
.requires_grad_(True)
)
else:
return (torch.rand(B, 1, dtype=dtype) * 2 - 1).detach().requires_grad_(True)
def test_scalar_multiplication_distributive(a, r1, r2, manifold, dtype):
res = manifold.mobius_scalar_mul(r1 + r2, a)
res1 = manifold.mobius_add(
manifold.mobius_scalar_mul(r1, a),
manifold.mobius_scalar_mul(r2, a),
)
res2 = manifold.mobius_add(
manifold.mobius_scalar_mul(r1, a),
manifold.mobius_scalar_mul(r2, a),
)
tolerance = {
torch.float32: dict(atol=5e-6, rtol=1e-4),
torch.float64: dict(atol=1e-7, rtol=1e-4),
}
np.testing.assert_allclose(res1.detach(), res.detach(), **tolerance[dtype])
np.testing.assert_allclose(res2.detach(), res.detach(), **tolerance[dtype])
res.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(r1.grad).all()
assert torch.isfinite(r2.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_scalar_multiplication_associative(a, r1, r2, manifold, dtype):
res = manifold.mobius_scalar_mul(r1 * r2, a)
res1 = manifold.mobius_scalar_mul(r1, manifold.mobius_scalar_mul(r2, a))
res2 = manifold.mobius_scalar_mul(r2, manifold.mobius_scalar_mul(r1, a))
tolerance = {
torch.float32: dict(atol=1e-5, rtol=1e-5),
torch.float64: dict(atol=1e-7, rtol=1e-7),
}
np.testing.assert_allclose(res1.detach(), res.detach(), **tolerance[dtype])
np.testing.assert_allclose(res2.detach(), res.detach(), **tolerance[dtype])
res.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(r1.grad).all()
assert torch.isfinite(r2.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_scaling_property(a, r1, manifold, dtype):
x1 = a / a.norm(dim=-1, keepdim=True)
ra = manifold.mobius_scalar_mul(r1, a)
x2 = manifold.mobius_scalar_mul(abs(r1), a) / ra.norm(dim=-1, keepdim=True)
tolerance = {
torch.float32: dict(rtol=1e-5, atol=1e-6),
torch.float64: dict(atol=1e-10),
}
np.testing.assert_allclose(x1.detach(), x2.detach(), **tolerance[dtype])
x2.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(r1.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_geodesic_borders(a, b, manifold, dtype):
geo0 = manifold.geodesic(torch.tensor(0.0, dtype=dtype), a, b)
geo1 = manifold.geodesic(torch.tensor(1.0, dtype=dtype), a, b)
tolerance = {
torch.float32: dict(rtol=1e-5, atol=5e-5),
torch.float64: dict(atol=1e-10),
}
np.testing.assert_allclose(geo0.detach(), a.detach(), **tolerance[dtype])
np.testing.assert_allclose(geo1.detach(), b.detach(), **tolerance[dtype])
(geo0 + geo1).sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_geodesic_segment_length_property(a, b, manifold, dtype):
extra_dims = len(a.shape)
segments = 12
t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(
(segments + 1,) + (1,) * extra_dims
)
gamma_ab_t = manifold.geodesic(t, a, b)
gamma_ab_t0 = gamma_ab_t[:-1]
gamma_ab_t1 = gamma_ab_t[1:]
dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)
speed = manifold.dist(a, b, keepdim=True).unsqueeze(0).expand_as(dist_ab_t0mt1)
# we have exactly 12 line segments
tolerance = {
torch.float32: dict(rtol=1e-5, atol=5e-3),
torch.float64: dict(rtol=1e-5, atol=5e-3),
}
length = speed / segments
np.testing.assert_allclose(
dist_ab_t0mt1.detach(), length.detach(), **tolerance[dtype]
)
(length + dist_ab_t0mt1).sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_geodesic_segement_unit_property(a, b, manifold, dtype):
extra_dims = len(a.shape)
segments = 12
t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(
(segments + 1,) + (1,) * extra_dims
)
gamma_ab_t = manifold.geodesic_unit(t, a, b)
gamma_ab_t0 = gamma_ab_t[:1]
gamma_ab_t1 = gamma_ab_t
dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)
true_distance_travelled = t.expand_as(dist_ab_t0mt1)
# we have exactly 12 line segments
tolerance = {
torch.float32: dict(atol=2e-4, rtol=5e-5),
torch.float64: dict(atol=1e-10),
}
np.testing.assert_allclose(
dist_ab_t0mt1.detach(), true_distance_travelled.detach(), **tolerance[dtype]
)
(true_distance_travelled + dist_ab_t0mt1).sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_expmap_logmap(a, b, manifold, dtype):
# this test appears to be numerical unstable once a and b may appear on the opposite sides
bh = manifold.expmap(x=a, u=manifold.logmap(a, b))
tolerance = {torch.float32: dict(rtol=1e-5, atol=5e-5), torch.float64: dict()}
np.testing.assert_allclose(bh.detach(), b.detach(), **tolerance[dtype])
bh.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_expmap0_logmap0(a, manifold, dtype):
# this test appears to be numerical unstable once a and b may appear on the opposite sides
v = manifold.logmap0(a)
norm = manifold.norm(torch.zeros_like(v), v, keepdim=True)
dist = manifold.dist0(a, keepdim=True)
bh = manifold.expmap0(v)
tolerance = {torch.float32: dict(atol=1e-5, rtol=1e-5), torch.float64: dict()}
np.testing.assert_allclose(bh.detach(), a.detach(), **tolerance[dtype])
np.testing.assert_allclose(norm.detach(), dist.detach(), **tolerance[dtype])
(bh.sum() + dist.sum()).backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_matvec_zeros(a, manifold):
mat = a.new_zeros((3, a.shape[-1]))
z = manifold.mobius_matvec(mat, a)
np.testing.assert_allclose(z.detach(), 0.0)
z.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_matvec_via_equiv_fn_apply(a, negative, manifold, strict, dtype):
mat = a.new(3, a.shape[-1]).normal_()
y = manifold.mobius_fn_apply(lambda x: x @ mat.transpose(-1, -2), a)
y1 = manifold.mobius_matvec(mat, a)
tolerance = {torch.float32: dict(atol=1e-5, rtol=1e-5), torch.float64: dict()}
tolerant_allclose_check(y, y1, strict=strict, **tolerance[dtype])
y.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_mobiusify(a, c, negative, strict, dtype):
mat = a.new(3, a.shape[-1]).normal_()
@stereographic.math.mobiusify
def matvec(x):
return x @ mat.transpose(-1, -2)
y = matvec(a, k=-c)
y1 = stereographic.math.mobius_matvec(mat, a, k=-c)
tolerance = {torch.float32: dict(atol=1e-5, rtol=1e-5), torch.float64: dict()}
tolerant_allclose_check(y, y1, strict=strict, **tolerance[dtype])
y.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(c.grad).all()
def test_matvec_chain_via_equiv_fn_apply(a, negative, manifold, dtype):
mat1 = a.new(a.shape[-1], a.shape[-1]).normal_()
mat2 = a.new(a.shape[-1], a.shape[-1]).normal_()
y = manifold.mobius_fn_apply_chain(
a,
lambda x: x @ mat1.transpose(-1, -2),
lambda x: x @ mat2.transpose(-1, -2),
)
y1 = manifold.mobius_matvec(mat1, a)
y1 = manifold.mobius_matvec(mat2, y1)
tolerance = {torch.float32: dict(atol=1e-5, rtol=1e-5), torch.float64: dict()}
tolerant_allclose_check(y, y1, strict=negative, **tolerance[dtype])
y.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_transp0_preserves_inner_products(a, manifold):
# pointing to the center
v_0 = torch.rand_like(a) + 1e-5
u_0 = torch.rand_like(a) + 1e-5
zero = torch.zeros_like(a)
v_a = manifold.transp0(a, v_0)
u_a = manifold.transp0(a, u_0)
# compute norms
vu_0 = manifold.inner(zero, v_0, u_0, keepdim=True)
vu_a = manifold.inner(a, v_a, u_a, keepdim=True)
np.testing.assert_allclose(vu_a.detach(), vu_0.detach(), atol=1e-6, rtol=1e-6)
(vu_0 + vu_a).sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_transp0_is_same_as_usual(a, manifold):
# pointing to the center
v_0 = torch.rand_like(a) + 1e-5
zero = torch.zeros_like(a)
v_a = manifold.transp0(a, v_0)
v_a1 = manifold.transp(zero, a, v_0)
# compute norms
np.testing.assert_allclose(v_a.detach(), v_a1.detach(), atol=1e-6, rtol=1e-6)
(v_a + v_a1).sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_transp_a_b(a, b, manifold):
# pointing to the center
v_0 = torch.rand_like(a)
u_0 = torch.rand_like(a)
v_1 = manifold.transp(a, b, v_0)
u_1 = manifold.transp(a, b, u_0)
# compute norms
vu_1 = manifold.inner(b, v_1, u_1, keepdim=True)
vu_0 = manifold.inner(a, v_0, u_0, keepdim=True)
np.testing.assert_allclose(vu_0.detach(), vu_1.detach(), atol=1e-6, rtol=1e-6)
(vu_0 + vu_1).sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_add_infinity_and_beyond(a, b, c, negative, manifold, dtype):
_a = a
if torch.isclose(c, c.new_zeros(())).any():
pytest.skip("zero not checked")
infty = b * 10000000
for i in range(100):
z = manifold.expmap(a, infty, project=False)
z = manifold.projx(z)
assert not torch.isnan(z).any(), ("Found nans", i, z)
assert torch.isfinite(z).all(), ("Found Infs", i, z)
z = manifold.mobius_scalar_mul(
torch.tensor(1000.0, dtype=z.dtype), z, project=False
)
z = manifold.projx(z)
assert not torch.isnan(z).any(), ("Found nans", i, z)
assert torch.isfinite(z).all(), ("Found Infs", i, z)
infty = manifold.transp(a, z, infty)
assert torch.isfinite(infty).all(), (i, infty)
a = z
z = manifold.expmap(a, -infty)
# they just need to be very far, exact answer is not supposed
tolerance = {
torch.float32: dict(rtol=3e-1, atol=2e-1),
torch.float64: dict(rtol=1e-1, atol=1e-3),
}
if negative:
np.testing.assert_allclose(z.detach(), -a.detach(), **tolerance[dtype])
else:
assert not torch.isnan(z).any(), "Found nans"
assert not torch.isnan(a).any(), "Found nans"
def test_mobius_coadd(a, b, negative, manifold, strict):
# (a \boxplus_c b) \ominus_c b = a
ah = manifold.mobius_sub(manifold.mobius_coadd(a, b), b)
tolerant_allclose_check(a, ah, strict=strict, atol=5e-5)
ah.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_mobius_cosub(a, b, negative, manifold, strict):
# (a \oplus_c b) \boxminus b = a
ah = manifold.mobius_cosub(manifold.mobius_add(a, b), b)
tolerant_allclose_check(a, ah, strict=strict, atol=1e-5)
ah.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_distance2plane(a, manifold):
v = torch.rand_like(a).requires_grad_()
vr = v / manifold.norm(a, v, keepdim=True)
z = manifold.expmap(a, vr)
dist1 = manifold.dist(a, z)
dist = manifold.dist2plane(z, a, vr)
np.testing.assert_allclose(dist.detach(), dist1.detach(), atol=2e-4, rtol=1e-4)
(dist + dist1).sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(v.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_sproj(manifold, a):
ma = manifold.sproj(manifold.inv_sproj(a))
np.testing.assert_allclose(ma.detach(), a.detach(), atol=1e-5)
ma.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
def test_antipode(manifold, negative, a, dtype, seed):
if seed == 39:
pytest.skip("This is amazingly unstable when tested against extreme values")
ma = manifold.antipode(a)
if manifold.k.le(0).all():
np.testing.assert_allclose(ma.detach(), -a.detach())
else:
s = manifold.inv_sproj(a)
ms = manifold.inv_sproj(ma)
tolerance = {torch.float32: dict(atol=1e-5), torch.float64: dict(atol=1e-6)}
np.testing.assert_allclose(ms.detach(), -s.detach(), **tolerance[dtype])
ma.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
@pytest.mark.parametrize("_k,lincomb", itertools.product([-1, 0, 1], [True, False]))
def test_weighted_midpoint(_k, lincomb):
manifold = stereographic.Stereographic(_k, learnable=True)
a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))
mid = manifold.weighted_midpoint(a, lincomb=lincomb)
assert torch.isfinite(mid).all()
assert mid.shape == (a.shape[-1],)
mid.sum().backward()
assert torch.isfinite(a.grad).all()
assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(()))
@pytest.mark.parametrize("_k,lincomb", itertools.product([-1, 0, 1], [True, False]))
def test_weighted_midpoint_reduce_dim(_k, lincomb):
manifold = stereographic.Stereographic(_k, learnable=True)
a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))
mid = manifold.weighted_midpoint(a, reducedim=[0], lincomb=lincomb)
assert mid.shape == a.shape[-2:]
assert torch.isfinite(mid).all()
mid.sum().backward()
assert torch.isfinite(a.grad).all()
assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(()))
@pytest.mark.parametrize("_k,lincomb", itertools.product([-1, 0, 1], [True, False]))
def test_weighted_midpoint_weighted(_k, lincomb):
manifold = stereographic.Stereographic(_k, learnable=True)
a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))
mid = manifold.weighted_midpoint(
a, reducedim=[0], lincomb=lincomb, weights=torch.rand_like(a[..., 0])
)
assert mid.shape == a.shape[-2:]
assert torch.isfinite(mid).all()
mid.sum().backward()
assert torch.isfinite(a.grad).all()
assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(()))
@pytest.mark.parametrize("_k,lincomb", itertools.product([-1, 0, 1], [True, False]))
def test_weighted_midpoint_zero(_k, lincomb):
manifold = stereographic.Stereographic(_k, learnable=True)
a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))
mid = manifold.weighted_midpoint(
a, reducedim=[0], lincomb=lincomb, weights=torch.zeros_like(a[..., 0])
)
assert mid.shape == a.shape[-2:]
assert torch.allclose(mid, torch.zeros_like(mid))
mid.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
@pytest.mark.parametrize("lincomb", [True, False])
def test_weighted_midpoint_euclidean(lincomb):
manifold = stereographic.Stereographic(0)
a = geoopt.ManifoldParameter(manifold.random(2, 3, 10))
mid = manifold.weighted_midpoint(a, reducedim=[0], lincomb=lincomb)
assert mid.shape == a.shape[-2:]
if lincomb:
assert torch.allclose(mid, a.sum(0))
else:
assert torch.allclose(mid, a.mean(0))
@pytest.mark.parametrize("_k,lincomb", itertools.product([-1, 0, 1], [True, False]))
def test_weighted_midpoint_weighted_zero_sum(_k, lincomb):
manifold = stereographic.Stereographic(_k, learnable=True)
a = geoopt.ManifoldParameter(
manifold.expmap0(torch.eye(3, 10)).detach(), manifold=manifold
)
weights = torch.rand_like(a[..., 0])
weights = weights - weights.sum() / weights.numel()
mid = manifold.weighted_midpoint(
a, lincomb=lincomb, weights=weights, posweight=True
)
if _k == 0 and lincomb:
np.testing.assert_allclose(
mid.detach(),
torch.cat([weights, torch.zeros(a.size(-1) - a.size(0))]),
atol=1e-6,
)
assert mid.shape == a.shape[-1:]
assert torch.isfinite(mid).all()
mid.sum().backward()
assert torch.isfinite(a.grad).all()
| [
"torch.zeros",
"torch.rand",
"torch.rand_like",
"torch.arange",
"torch.isnan",
"torch.linspace",
"torch.isfinite",
"torch.ones",
"torch.manual_seed",
"torch.tensor",
"torch.eye",
"torch.zeros_like",
"torch.as_tensor",
"torch.empty"
] | 1.9.0 | leonMatzner/geoopt | 4a7058e43bf78ab5012b862076a74bec175df221 |
1.5 | '''
source:https://github.com/WolffyChen/PytorchToCaffe/blob/master/pytorch_to_caffe.py
'''
import torch
import torch.nn as nn
import traceback
from Caffe import caffe_net
import torch.nn.functional as F
from torch.autograd import Variable
from Caffe import layer_param
from torch.nn.modules.utils import _pair
import numpy as np
"""
How to support a new layer type:
layer_name=log.add_layer(layer_type_name)
top_blobs=log.add_blobs(<output of that layer>)
layer=caffe_net.Layer_param(xxx)
<set layer parameters>
[<layer.add_data(*datas)>]
log.cnet.add_layer(layer)
Please MUTE the inplace operations to avoid not find in graph
注意:只有torch.nn.functional中的函数才能转换为caffe中的层
"""
# TODO: support the inplace output of the layers
class Blob_LOG():
def __init__(self):
self.data = {}
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def __len__(self):
return len(self.data)
NET_INITTED = False
# 转换原理解析:通过记录
class TransLog(object):
def __init__(self):
"""
doing init() with inputs Variable before using it
"""
self.layers = {}
self.detail_layers = {}
self.detail_blobs = {}
self._blobs = Blob_LOG()
self._blobs_data = []
self.cnet = caffe_net.Caffemodel('')
self.debug = True
def init(self, inputs):
"""
:param inputs: is a list of input variables
"""
self.layers['data'] = 'data'
self.add_blobs(inputs, 'data', False)
def add_layer(self, name='layer'):
if name in self.layers:
return self.layers[name]
if name not in self.detail_layers.keys():
self.detail_layers[name] = 0
self.detail_layers[name] += 1
name = '{}{}'.format(name,self.detail_layers[name])
self.layers[name] = name
if self.debug:
print('{} was added to layers'.format(self.layers[name]))
return self.layers[name]
def add_blobs(self, blobs, name='blob', with_num=True):
rst=[]
for blob in blobs:
self._blobs_data.append(blob) # to block the memory address be rewrited
blob_id=int(id(blob))
if name not in self.detail_blobs.keys():
self.detail_blobs[name] = 0
self.detail_blobs[name] += 1
if with_num:
rst.append('{}{}'.format(name, self.detail_blobs[name]))
else:
rst.append('{}'.format(name))
if self.debug:
print("{}:{} was added to blobs".format(blob_id, rst[-1]))
#print('Add blob {} : {}'.format(rst[-1].center(21),blob.size()))
self._blobs[blob_id] = rst[-1]
return rst
def blobs(self, var):
var = id(var)
#if self.debug:
# print("{}:{} getting".format(var, self._blobs[var]))
try:
return self._blobs[var]
except:
print("WARNING: CANNOT FOUND blob {}".format(var))
return None
def inplace_flag(self, name='layer'):
key_list = ['add', 'sub', 'mul']
for kl in key_list:
if kl in name:
return False
key_num = 3
vl = list(self.layers.values())
if vl.count(name) >= key_num:
return False
return True
log = TransLog()
layer_names = {}
def _conv2d(raw,input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
print('conv: ', log.blobs(input))
x = raw(input, weight, bias, stride, padding, dilation, groups)
name = log.add_layer(name='conv')
log.add_blobs([x], name='conv')
layer = caffe_net.Layer_param(name=name, type='Convolution', bottom=[log.layers[log.blobs(input)]], top=[log.blobs(x)])
layer.conv_param(x.size()[1], weight.size()[2:], stride=_pair(stride),
pad=_pair(padding), dilation=_pair(dilation), bias_term=bias is not None, groups=groups)
if bias is not None:
layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
else:
layer.param.convolution_param.bias_term = False
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
return x
def _conv_transpose2d(raw,input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
x = raw(input, weight, bias, stride, padding, output_padding, groups, dilation)
name = log.add_layer(name='conv_transpose')
log.add_blobs([x], name='conv_transpose')
layer = caffe_net.Layer_param(name=name, type='Deconvolution', bottom=[log.layers[log.blobs(input)]], top=[log.blobs(x)])
layer.conv_param(x.size()[1], weight.size()[2:], stride=_pair(stride),
pad=_pair(padding), dilation=_pair(dilation), bias_term=bias is not None, groups=groups)
if bias is not None:
layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
else:
layer.param.convolution_param.bias_term = False
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
return x
def _linear(raw,input, weight, bias=None):
x = raw(input,weight,bias)
layer_name = log.add_layer(name='fc')
top_blobs = log.add_blobs([x],name='fc')
layer = caffe_net.Layer_param(name=layer_name, type='InnerProduct', bottom=[log.layers[log.blobs(input)]], top=top_blobs)
layer.fc_param(x.size()[1], has_bias=bias is not None)
if bias is not None:
layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
else:
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
return x
def _split(raw,input, split_size, dim=0):
# split in pytorch is slice in caffe
x = raw(input, split_size, dim)
layer_name = log.add_layer('split')
top_blobs = log.add_blobs([x], name='split')
layer = caffe_net.Layer_param(name=layer_name, type='Slice', bottom=[log.layers[log.blobs(input)]], top=top_blobs)
slice_num = int(np.floor(input.size()[dim] / split_size))
slice_param = caffe_net.pb.SliceParameter(axis=dim, slice_point=[split_size * i for i in range(1, slice_num)])
layer.param.slice_param.CopyFrom(slice_param)
log.cnet.add_layer(layer)
return x
def _pool(type,raw,input, x, kernel_size, stride, padding, ceil_mode):
# TODO dilation,ceil_mode,return indices
layer_name = log.add_layer(name='{}_pool'.format(type))
top_blobs = log.add_blobs([x], name='{}_pool'.format(type))
layer = caffe_net.Layer_param(name=layer_name, type='Pooling', bottom=[log.layers[log.blobs(input)]], top=top_blobs)
# TODO w,h different kernel, stride and padding
# processing ceil mode
layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride,
pad=padding, type=type.upper(), ceil_mode=ceil_mode)
log.cnet.add_layer(layer)
if ceil_mode == False and stride is not None:
oheight = (input.size()[2] - _pair(kernel_size)[0] + 2 * _pair(padding)[0]) % (_pair(stride)[0])
owidth = (input.size()[3] - _pair(kernel_size)[1] + 2 * _pair(padding)[1]) % (_pair(stride)[1])
if oheight != 0 or owidth != 0:
caffe_out = raw(input, kernel_size, stride, padding, ceil_mode=True)
print("WARNING: the output shape miss match at {}: "
"input {} output---Pytorch:{}---Caffe:{}\n"
"This is caused by the different implementation that ceil mode in caffe and the floor mode in pytorch.\n"
"You can add the clip layer in caffe prototxt manually if shape mismatch error is caused in caffe. ".format(layer_name, input.size(), x.size(), caffe_out.size()))
def _max_pool2d(raw,input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):
x = raw(input, kernel_size, stride, padding, dilation,ceil_mode, return_indices)
_pool('max',raw,input, x, kernel_size, stride, padding,ceil_mode)
return x
def _avg_pool2d(raw,input, kernel_size, stride = None, padding = 0, ceil_mode = False, count_include_pad=True):
x = raw(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
_pool('ave',raw,input, x, kernel_size, stride, padding,ceil_mode)
return x
def _adaptive_pool(type,raw,input, x, kernel_size, stride):
layer_name = log.add_layer(name='{}_pool'.format(type))
top_blobs = log.add_blobs([x], name='{}_pool'.format(type))
layer = caffe_net.Layer_param(name=layer_name, type='Pooling', bottom=[log.layers[log.blobs(input)]], top=top_blobs)
# TODO w,h different kernel, stride and padding
# processing ceil mode
layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride, pad=0, type=type.upper(), ceil_mode='ceil')
log.cnet.add_layer(layer)
def _adaptive_max_pool2d(raw,input, output_size=(1, 1)):
x = raw(input, output_size)
_adaptive_pool('max',raw,input, x, input.size(2), 1)
return x
def _adaptive_avg_pool2d(raw,input, output_size=(1, 1)):
x = raw(input, output_size)
_adaptive_pool('ave',raw,input, x, input.size(2), 1)
return x
def _flatten(raw,*args):
x = raw(*args)
if len(args) == 1:
# TODO
assert NotImplementedError
else:
layer_name = log.add_layer(name='flatten')
top_blobs = log.add_blobs([x],name='flatten')
layer = caffe_net.Layer_param(name=layer_name, type='Reshape', bottom=[log.layers[log.blobs(args[0])]], top=top_blobs)
dims = list([0, 1])
dims[0] = 0 # the first dim should be batch_size
for s in x.size()[1:]:
dims[1] *= s
layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))
log.cnet.add_layer(layer)
return x
def _max(raw,*args):
x = raw(*args)
if len(args) == 1:
# TODO max in one tensor
assert NotImplementedError
else:
bottom_blobs = []
for arg in args:
bottom_blobs.append(log.layers[log.blobs(arg)])
layer_name = log.add_layer(name='max')
top_blobs = log.add_blobs([x], name='max')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=bottom_blobs, top=top_blobs)
layer.param.eltwise_param.operation = 2
log.cnet.add_layer(layer)
return x
def _cat(raw,inputs, dimension=0):
x = raw(inputs, dimension)
bottom_blobs = []
for input in inputs:
bottom_blobs.append(log.layers[log.blobs(input)])
layer_name = log.add_layer(name='cat')
top_blobs = log.add_blobs([x], name='cat')
layer = caffe_net.Layer_param(name=layer_name, type='Concat', bottom=bottom_blobs, top=top_blobs)
layer.param.concat_param.axis = dimension
log.cnet.add_layer(layer)
return x
def _dropout(raw,input, p=0.5, training=False, inplace=False):
x = raw(input, p, training)
bottom_blobs = [log.layers[log.blobs(input)]]
layer_name = log.add_layer(name='dropout')
top_blobs = log.add_blobs([x], name='dropout')
layer = caffe_net.Layer_param(name=layer_name, type='Dropout', bottom=bottom_blobs, top=bottom_blobs)
layer.param.dropout_param.dropout_ratio = p
layer.param.include.extend([caffe_net.pb.NetStateRule(phase=1)]) # 1 for test, 0 for train
log.cnet.add_layer(layer)
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
return x
def _threshold(raw,input, threshold, value, inplace=False):
# for threshold or relu
if threshold == 0 and value == 0:
x = raw(input, threshold, value)
bottom_blobs = [log.layers[log.blobs(input)]]
name = log.add_layer(name='relu')
log.add_blobs([x], name='relu')
layer = caffe_net.Layer_param(name=name, type='ReLU', bottom=bottom_blobs, top=bottom_blobs)
log.cnet.add_layer(layer)
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
return x
if value!=0:
raise NotImplemented("value !=0 not implemented in caffe")
x = raw(input, threshold, value, inplace)
bottom_blobs = [log.layers[log.blobs(input)]]
layer_name = log.add_layer(name='threshold')
top_blobs = log.add_blobs([x], name='threshold')
layer = caffe_net.Layer_param(name=layer_name, type='Threshold', bottom=bottom_blobs, top=bottom_blobs)
layer.param.threshold_param.threshold = threshold
log.cnet.add_layer(layer)
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
return x
def _relu(raw,input, inplace=False):
# for threshold or prelu
x = raw(input)
name = log.add_layer(name='relu')
log.add_blobs([x], name='relu')
layer = caffe_net.Layer_param(name=name, type='ReLU', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])
log.cnet.add_layer(layer)
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
return x
def _prelu(raw,input, weight):
# for threshold or prelu
x = raw(input, weight)
bottom_blobs = [log.layers[log.blobs(input)]]
name = log.add_layer(name='prelu')
log.add_blobs([x], name='prelu')
layer = caffe_net.Layer_param(name=name, type='PReLU', bottom=bottom_blobs, top=bottom_blobs)
if weight.size()[0] == 1:
layer.param.prelu_param.channel_shared = True
layer.add_data(weight.cpu().data.numpy()[0])
else:
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
return x
def _leaky_relu(raw,input, negative_slope=0.01, inplace=False):
x = raw(input, negative_slope)
name = log.add_layer(name='leaky_relu')
log.add_blobs([x], name='leaky_relu')
layer = caffe_net.Layer_param(name=name, type='ReLU', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])
layer.param.relu_param.negative_slope = negative_slope
log.cnet.add_layer(layer)
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
return x
def _tanh(raw,input):
# for tanh activation
x = raw(input)
name = log.add_layer(name='tanh')
log.add_blobs([x], name='tanh')
layer = caffe_net.Layer_param(name=name, type='TanH', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])
log.cnet.add_layer(layer)
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
return x
def _softmax(raw,input, dim=None, _stacklevel=3):
# for F.softmax
x = raw(input, dim=dim)
if dim is None:
dim = F._get_softmax_dim('softmax', input.dim(), _stacklevel)
bottom_blobs = [log.layers[log.blobs(input)]]
name = log.add_layer(name='softmax')
log.add_blobs([x], name='softmax')
layer = caffe_net.Layer_param(name=name, type='Softmax', bottom=bottom_blobs, top=[log.blobs(x)])
layer.param.softmax_param.axis = dim
log.cnet.add_layer(layer)
return x
def _batch_norm(raw,input, running_mean, running_var, weight=None, bias=None, training=False, momentum=0.1, eps=1e-5):
# because the runing_mean and runing_var will be changed after the _batch_norm operation, we first save the parameters
x = raw(input, running_mean, running_var, weight, bias, training, momentum, eps)
bottom_blobs = [log.layers[log.blobs(input)]]
layer_name1 = log.add_layer(name='batch_norm')
top_blobs = log.add_blobs([x], name='batch_norm')
if log.inplace_flag(log.layers[log.blobs(input)]):
top_blobs = bottom_blobs
layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm', bottom=bottom_blobs, top=top_blobs)
if running_mean is None or running_var is None:
# not use global_stats, normalization is performed over the current mini-batch
layer1.batch_norm_param(use_global_stats=0, eps=eps)
else:
layer1.batch_norm_param(use_global_stats=1, eps=eps)
running_mean_clone = running_mean.clone()
running_var_clone = running_var.clone()
layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
log.cnet.add_layer(layer1)
if weight is not None and bias is not None:
layer_name2 = log.add_layer(name='bn_scale')
layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale', bottom=top_blobs, top=top_blobs)
layer2.param.scale_param.bias_term = True
layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
log.cnet.add_layer(layer2)
if log.inplace_flag(log.layers[log.blobs(input)]):
log.layers[layer_name2] = log.layers[log.blobs(input)]
else:
log.layers[layer_name2] = log.layers[log.blobs(x)]
if log.inplace_flag(log.layers[log.blobs(input)]):
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
return x
def _instance_norm(raw,input, running_mean=None, running_var=None, weight=None, bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
# TODO: the batch size!=1 view operations
print("WARNING: The Instance Normalization transfers to Caffe using BatchNorm, so the batch size should be 1")
if running_var is not None or weight is not None:
# TODO: the affine=True or track_running_stats=True case
raise NotImplementedError("not implement the affine=True or track_running_stats=True case InstanceNorm")
x= torch.batch_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps,torch.backends.cudnn.enabled)
bottom_blobs = [log.layers[log.blobs(input)]]
layer_name1 = log.add_layer(name='instance_norm')
top_blobs = log.add_blobs([x], name='instance_norm')
if log.inplace_flag(log.layers[log.blobs(input)]):
top_blobs = bottom_blobs
layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm', bottom=bottom_blobs, top=top_blobs)
if running_mean is None or running_var is None:
# not use global_stats, normalization is performed over the current mini-batch
layer1.batch_norm_param(use_global_stats=0,eps=eps)
running_mean = torch.zeros(input.size()[1])
running_var = torch.ones(input.size()[1])
else:
layer1.batch_norm_param(use_global_stats=1, eps=eps)
running_mean_clone = running_mean.clone()
running_var_clone = running_var.clone()
layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
log.cnet.add_layer(layer1)
if weight is not None and bias is not None:
layer_name2 = log.add_layer(name='bn_scale')
layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale', bottom=top_blobs, top=top_blobs)
layer2.param.scale_param.bias_term = True
layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
log.cnet.add_layer(layer2)
if log.inplace_flag(log.layers[log.blobs(input)]):
log.layers[layer_name2] = log.layers[log.blobs(input)]
else:
log.layers[layer_name2] = log.layers[log.blobs(x)]
if log.inplace_flag(log.layers[log.blobs(input)]):
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
return x
#upsample layer
def _interpolate(raw,input, size=None, scale_factor=None, mode='nearest', align_corners=None):
# 定义的参数包括 scale,即输出与输入的尺寸比例,如 2;scale_h、scale_w,
# 同 scale,分别为 h、w 方向上的尺寸比例;pad_out_h、pad_out_w,仅在 scale 为 2 时
# 有用,对输出进行额外 padding 在 h、w 方向上的数值;upsample_h、upsample_w,输
# 出图像尺寸的数值。在 Upsample 的相关代码中,推荐仅仅使用 upsample_h、
# upsample_w 准确定义 Upsample 层的输出尺寸,其他所有的参数都不推荐继续使用。
# for nearest _interpolate
if mode != "nearest" or align_corners != None:
raise NotImplementedError("not implement F.interpolate totoaly")
x = raw(input,size , scale_factor ,mode)
layer_name = log.add_layer(name='upsample')
top_blobs = log.add_blobs([x], name='upsample'.format(type))
layer = caffe_net.Layer_param(name=layer_name, type='Upsample', bottom=[log.layers[log.blobs(input)]], top=top_blobs)
layer.upsample_param(size =(input.size(2),input.size(3)), scale_factor= scale_factor)
log.cnet.add_layer(layer)
return x
#sigmid layer
def _sigmoid(raw,input):
# Applies the element-wise function:
# Sigmoid(x)= 1/(1+exp(−x))
x = raw(input)
name = log.add_layer(name='sigmoid')
log.add_blobs([x], name='sigmoid')
layer = caffe_net.Layer_param(name=name, type='Sigmoid', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])
log.cnet.add_layer(layer)
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
#tanh layer
def _tanh(raw,input):
# Applies the element-wise function:
# torch.nn.Tanh
x = raw(input)
name = log.add_layer(name='tanh')
log.add_blobs([x], name='tanh')
layer = caffe_net.Layer_param(name=name, type='TanH', bottom=[log.layers[log.blobs(input)]], top=[log.layers[log.blobs(input)]])
log.cnet.add_layer(layer)
log.layers[log.blobs(x)] = log.layers[log.blobs(input)]
def _hardtanh(raw, input, min_val, max_val, inplace):
# Applies the element-wise function:
# torch.nn.ReLu6
print('relu6: ', log.blobs(input))
x = raw(input, min_val, max_val)
name = log.add_layer(name='relu6')
log.add_blobs([x], name='relu6_blob')
layer = caffe_net.Layer_param(name=name, type='ReLU6', bottom=[log.blobs(input)], top=[log.blobs(x)])
log.cnet.add_layer(layer)
return x
#L2Norm layer
def _l2Norm(raw, input, weight, eps):
# Applies the element-wise function:
# L2Norm in vgg_ssd
x = raw(input, weight, eps)
name = log.add_layer(name='normalize')
log.add_blobs([x], name='normalize_blob')
layer = caffe_net.Layer_param(name=name, type='Normalize', bottom=[log.blobs(input)], top=[log.blobs(x)])
layer.norm_param(eps)
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
return x
def _div(raw,inputs, inputs2):
x=raw(inputs, inputs2)
log.add_blobs([x],name='div_blob')
return x
# ----- for Variable operations --------
def _view(input,*args):
x = raw_view(input, *args)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='view')
top_blobs = log.add_blobs([x],name='view')
layer = caffe_net.Layer_param(name=layer_name, type='Reshape', bottom=[log.layers[log.blobs(input)]], top=top_blobs)
# TODO: reshpae added to nn_tools layer
dims = list(args)
dims[0] = 0 # the first dim should be batch_size
layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))
log.cnet.add_layer(layer)
return x
def _mean(input,*args,**kwargs):
x = raw_mean(input, *args,**kwargs)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='mean')
top_blobs = log.add_blobs([x],name='mean')
layer = caffe_net.Layer_param(name=layer_name, type='Reduction', bottom=[log.layers[log.blobs(input)]], top=top_blobs)
if len(args)==1:
dim = args[0]
elif 'dim' in kwargs:
dim = kwargs['dim']
else:
raise NotImplementedError('mean operation must specify a dim')
layer.param.reduction_param.operation = 4
layer.param.reduction_param.axis = dim
log.cnet.add_layer(layer)
return x
def _add(input,*args):
x = raw__add__(input, *args)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='add')
top_blobs = log.add_blobs([x], name='add')
if log.blobs(args[0]) == None:
log.add_blobs([args[0]], name='extra')
else:
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)],log.layers[log.blobs(args[0])]], top=top_blobs)
layer.param.eltwise_param.operation = 1 # sum is 1
log.cnet.add_layer(layer)
return x
def _iadd(input,*args):
x = raw__iadd__(input, *args)
if not NET_INITTED:
return x
x = x.clone()
layer_name = log.add_layer(name='add')
top_blobs = log.add_blobs([x], name='add')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)],log.layers[log.blobs(args[0])]], top=top_blobs)
layer.param.eltwise_param.operation = 1 # sum is 1
log.cnet.add_layer(layer)
return x
def _sub(input,*args):
x = raw__sub__(input, *args)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='sub')
top_blobs = log.add_blobs([x], name='sub')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)],log.layers[log.blobs(args[0])]], top=top_blobs)
layer.param.eltwise_param.operation = 1 # sum is 1
layer.param.eltwise_param.coeff.extend([1.,-1.])
log.cnet.add_layer(layer)
return x
def _isub(input,*args):
x = raw__isub__(input, *args)
if not NET_INITTED:
return x
x = x.clone()
layer_name = log.add_layer(name='sub')
top_blobs = log.add_blobs([x], name='sub')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)],log.layers[log.blobs(args[0])]], top=top_blobs)
layer.param.eltwise_param.operation = 1 # sum is 1
log.cnet.add_layer(layer)
return x
def _mul(input,*args):
x = raw__mul__(input, *args)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='mul')
top_blobs = log.add_blobs([x], name='mul')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)], log.layers[log.blobs(args[0])]], top=top_blobs)
layer.param.eltwise_param.operation = 0 # product is 1
log.cnet.add_layer(layer)
return x
def _imul(input,*args):
x = raw__imul__(input, *args)
if not NET_INITTED:
return x
x = x.clone()
layer_name = log.add_layer(name='mul')
top_blobs = log.add_blobs([x], name='mul')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.layers[log.blobs(input)], log.layers[log.blobs(args[0])]], top=top_blobs)
layer.param.eltwise_param.operation = 0 # product is 1
layer.param.eltwise_param.coeff.extend([1., -1.])
log.cnet.add_layer(layer)
return x
#Permute layer
def _permute(input,*args):
x = raw__permute__(input, *args)
name = log.add_layer(name='permute')
log.add_blobs([x], name='permute')
layer = caffe_net.Layer_param(name=name, type='Permute', bottom=[log.blobs(input)], top=[log.blobs(x)])
order1 = args[0]
order2 = args[1]
order3 = args[2]
order4 = args[3]
layer.permute_param(order1, order2, order3, order4)
log.cnet.add_layer(layer)
return x
#contiguous
def _contiguous(input,*args):
x = raw__contiguous__(input, *args)
name = log.add_layer(name='contiguous')
log.add_blobs([x], name='contiguous')
layer = caffe_net.Layer_param(name=name, type='NeedRemove', bottom=[log.blobs(input)], top=[log.blobs(x)])
log.cnet.add_layer(layer)
return x
#pow
def _pow(input,*args):
x = raw__pow__(input, *args)
log.add_blobs([x], name='pow')
return x
#sum
def _sum(input,*args):
x = raw__sum__(input, *args)
log.add_blobs([x], name='sum')
return x
# sqrt
def _sqrt(input,*args):
x = raw__sqrt__(input, *args)
log.add_blobs([x], name='sqrt')
return x
# unsqueeze
def _unsqueeze(input,*args):
x = raw__unsqueeze__(input, *args)
log.add_blobs([x], name='unsqueeze')
return x
# sqrt
def _expand_as(input,*args):
x = raw__expand_as__(input, *args)
log.add_blobs([x], name='expand_as')
return x
# 核心组件,通过该类,实现对torch的function中的operators的输入,输出以及参数的读取
class Rp(object):
def __init__(self, raw, replace, **kwargs):
# replace the raw function to replace function
self.obj = replace
self.raw = raw
def __call__(self, *args, **kwargs):
if not NET_INITTED:
return self.raw(*args, **kwargs)
for stack in traceback.walk_stack(None):
if 'self' in stack[0].f_locals:
layer = stack[0].f_locals['self']
if layer in layer_names:
log.pytorch_layer_name = layer_names[layer]
print(layer_names[layer])
break
out = self.obj(self.raw,*args,**kwargs)
# if isinstance(out,Variable):
# out = [out]
return out
F.conv2d = Rp(F.conv2d, _conv2d)
F.linear = Rp(F.linear, _linear)
F.relu = Rp(F.relu, _relu)
F.leaky_relu = Rp(F.leaky_relu, _leaky_relu)
F.max_pool2d = Rp(F.max_pool2d, _max_pool2d)
F.avg_pool2d = Rp(F.avg_pool2d, _avg_pool2d)
F.dropout = Rp(F.dropout, _dropout)
F.threshold = Rp(F.threshold, _threshold)
F.prelu = Rp(F.prelu, _prelu)
F.batch_norm = Rp(F.batch_norm, _batch_norm)
F.instance_norm = Rp(F.instance_norm, _instance_norm)
F.softmax = Rp(F.softmax, _softmax)
F.conv_transpose2d = Rp(F.conv_transpose2d, _conv_transpose2d)
F.interpolate = Rp(F.interpolate, _interpolate)
F.sigmoid = Rp(F.sigmoid, _sigmoid)
F.tanh = Rp(F.tanh, _tanh)
F.hardtanh = Rp(F.hardtanh, _hardtanh)
# F.l2norm = Rp(F.l2norm, _l2Norm)
F.adaptive_max_pool2d = Rp(F.adaptive_max_pool2d, _adaptive_max_pool2d)
F.adaptive_avg_pool2d = Rp(F.adaptive_avg_pool2d, _adaptive_avg_pool2d)
torch.split = Rp(torch.split, _split)
torch.max = Rp(torch.max, _max)
torch.cat = Rp(torch.cat, _cat)
torch.div = Rp(torch.div, _div)
torch.flatten = Rp(torch.flatten, _flatten)
# TODO: other types of the view function
try:
raw_view = Variable.view
Variable.view = _view
raw_mean = Variable.mean
Variable.mean = _mean
raw__add__ = Variable.__add__
Variable.__add__ = _add
raw__iadd__ = Variable.__iadd__
Variable.__iadd__ = _iadd
raw__sub__ = Variable.__sub__
Variable.__sub__ = _sub
raw__isub__ = Variable.__isub__
Variable.__isub__ = _isub
raw__mul__ = Variable.__mul__
Variable.__mul__ = _mul
raw__imul__ = Variable.__imul__
Variable.__imul__ = _imul
except:
# for new version 0.4.0 and later version
for t in [torch.Tensor]:
raw_view = t.view
t.view = _view
raw_mean = t.mean
t.mean = _mean
raw__add__ = t.__add__
t.__add__ = _add
raw__iadd__ = t.__iadd__
t.__iadd__ = _iadd
raw__sub__ = t.__sub__
t.__sub__ = _sub
raw__isub__ = t.__isub__
t.__isub__ = _isub
raw__mul__ = t.__mul__
t.__mul__=_mul
raw__imul__ = t.__imul__
t.__imul__ = _imul
raw__permute__ = t.permute
t.permute = _permute
raw__contiguous__ = t.contiguous
t.contiguous = _contiguous
raw__pow__ = t.pow
t.pow = _pow
raw__sum__ = t.sum
t.sum = _sum
raw__sqrt__ = t.sqrt
t.sqrt = _sqrt
raw__unsqueeze__ = t.unsqueeze
t.unsqueeze = _unsqueeze
raw__expand_as__ = t.expand_as
t.expand_as = _expand_as
def trans_net(net,input_var, name='TransferedPytorchModel'):
print('Starting Transform, This will take a while')
log.init([input_var])
log.cnet.net.name = name
log.cnet.net.input.extend([log.blobs(input_var)])
log.cnet.net.input_dim.extend(input_var.size())
# layer = caffe_net.Layer_param(name='data', type='Input', top=['data'])
# layer.input_param(input_var.data.numpy().shape)
# log.cnet.add_layer(layer)
global NET_INITTED
NET_INITTED = True
for name,layer in net.named_modules():
layer_names[layer] = name
print("torch ops name:", layer_names)
out = net.forward(input_var)
print('Transform Completed')
for key in log.layers:
print('{} {}'.format(key, log.layers[key]))
def save_prototxt(save_name):
# log.cnet.remove_layer_by_type("NeedRemove")
log.cnet.save_prototxt(save_name)
def save_caffemodel(save_name):
log.cnet.save(save_name) | [
"torch.batch_norm",
"torch.nn.modules.utils._pair"
] | 1.5.1 | pandamax/carrier-of-tricks-for-classification-pytorch | 283a9f644b43d4800217bd10c1ab2accf1a787c6 |
1.5 | import torch
from torch.autograd import Function
from torch import nn
from .alias_multinomial import AliasMethod
import math
class NCEFunction(Function):
@staticmethod
def forward(self, x, y, memory, idx, params):
K = int(params[0].item())
T = params[1].item()
Z = params[2].item()
momentum = params[3].item()
batchSize = x.size(0)
outputSize = memory.size(0)
inputSize = memory.size(1)
# sample positives & negatives
idx.select(1,0).copy_(y.data)
# sample correspoinding weights
weight = torch.index_select(memory, 0, idx.view(-1))
weight.resize_(batchSize, K+1, inputSize)
# inner product
out = torch.bmm(weight, x.data.resize_(batchSize, inputSize, 1))
out.div_(T).exp_() # batchSize * self.K+1
x.data.resize_(batchSize, inputSize)
if Z < 0:
params[2] = out.mean() * outputSize
Z = params[2].item()
print("normalization constant Z is set to {:.1f}".format(Z))
out.div_(Z).resize_(batchSize, K+1)
self.save_for_backward(x, memory, y, weight, out, params)
return out
@staticmethod
def backward(self, gradOutput):
x, memory, y, weight, out, params = self.saved_tensors
K = int(params[0].item())
T = params[1].item()
Z = params[2].item()
momentum = params[3].item()
batchSize = gradOutput.size(0)
# gradients d Pm / d linear = exp(linear) / Z
gradOutput.data.mul_(out.data)
# add temperature
gradOutput.data.div_(T)
gradOutput.data.resize_(batchSize, 1, K+1)
# gradient of linear
gradInput = torch.bmm(gradOutput.data, weight)
gradInput.resize_as_(x)
# update the non-parametric data
weight_pos = weight.select(1, 0).resize_as_(x)
weight_pos.mul_(momentum)
weight_pos.add_(torch.mul(x.data, 1-momentum))
w_norm = weight_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_weight = weight_pos.div(w_norm)
memory.index_copy_(0, y, updated_weight)
return gradInput, None, None, None, None
class NCEAverage(nn.Module):
def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5, Z=None):
super(NCEAverage, self).__init__()
self.nLem = outputSize
self.unigrams = torch.ones(self.nLem)
self.multinomial = AliasMethod(self.unigrams)
self.multinomial.cuda()
self.K = K
self.register_buffer('params',torch.tensor([K, T, -1, momentum]));
stdv = 1. / math.sqrt(inputSize/3)
self.register_buffer('memory', torch.rand(outputSize, inputSize).mul_(2*stdv).add_(-stdv))
def forward(self, x, y):
batchSize = x.size(0)
idx = self.multinomial.draw(batchSize * (self.K+1)).view(batchSize, -1)
out = NCEFunction.apply(x, y, self.memory, idx, self.params)
return out
| [
"torch.rand",
"torch.mul",
"torch.bmm",
"torch.ones",
"torch.tensor"
] | 1.5.0 | xmengli999/self_supervised | b2d40d452d203f60330c84fb213c3ba848468366 |
1.10 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import glob
import hashlib
import importlib
import os
import re
import shutil
import uuid
import torch
import torch.utils.cpp_extension
from torch.utils.file_baton import FileBaton
#----------------------------------------------------------------------------
# Global options.
verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
# patch here. To add VS 2022 x64 support.
patterns = [
'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
'C:/Program Files/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files/Microsoft Visual Studio */vc/bin',
]
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if len(matches):
return matches[-1]
return None
#----------------------------------------------------------------------------
def _get_mangled_gpu_name():
name = torch.cuda.get_device_name().lower()
out = []
for c in name:
if re.match('[a-z0-9_-]+', c):
out.append(c)
else:
out.append('-')
return ''.join(out)
#----------------------------------------------------------------------------
# Main entry point for compiling and loading C++/CUDA plugins.
_cached_plugins = dict()
def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs):
assert verbosity in ['none', 'brief', 'full']
if headers is None:
headers = []
if source_dir is not None:
sources = [os.path.join(source_dir, fname) for fname in sources]
headers = [os.path.join(source_dir, fname) for fname in headers]
# Already cached?
if module_name in _cached_plugins:
return _cached_plugins[module_name]
# Print status.
if verbosity == 'full':
print(f'Setting up PyTorch plugin "{module_name}"...')
elif verbosity == 'brief':
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
verbose_build = (verbosity == 'full')
# Compile and load.
try: # pylint: disable=too-many-nested-blocks
# Make sure we can find the necessary compiler binaries.
if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += ';' + compiler_bindir
# Some containers set TORCH_CUDA_ARCH_LIST to a list that can either
# break the build or unnecessarily restrict what's available to nvcc.
# Unset it to let nvcc decide based on what's available on the
# machine.
os.environ['TORCH_CUDA_ARCH_LIST'] = ''
# Incremental build md5sum trickery. Copies all the input source files
# into a cached build directory under a combined md5 digest of the input
# source files. Copying is done only if the combined digest has changed.
# This keeps input file timestamps and filenames the same as in previous
# extension builds, allowing for fast incremental rebuilds.
#
# This optimization is done only in case all the source files reside in
# a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
# environment variable is set (we take this as a signal that the user
# actually cares about this.)
#
# EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work
# around the *.cu dependency bug in ninja config.
#
all_source_files = sorted(sources + headers)
all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files)
if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ):
# Compute combined hash digest for all source files.
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
# Select cached build directory name.
source_digest = hash_md5.hexdigest()
build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')
if not os.path.isdir(cached_build_dir):
tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'
os.makedirs(tmpdir)
for src in all_source_files:
shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src)))
try:
os.replace(tmpdir, cached_build_dir) # atomic
except OSError:
# source directory already exists, delete tmpdir and its contents.
shutil.rmtree(tmpdir)
if not os.path.isdir(cached_build_dir): raise
# Compile.
cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,
verbose=verbose_build, sources=cached_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
# Load.
module = importlib.import_module(module_name)
except:
if verbosity == 'brief':
print('Failed!')
raise
# Print status and add to cache dict.
if verbosity == 'full':
print(f'Done setting up PyTorch plugin "{module_name}".')
elif verbosity == 'brief':
print('Done.')
_cached_plugins[module_name] = module
return module
#----------------------------------------------------------------------------
| [
"torch.utils.cpp_extension.load",
"torch.cuda.get_device_name",
"torch.utils.cpp_extension._get_build_directory"
] | 1.10 | One-sixth/fid-helper-pytorch | 1d74e9e7e4622bd0ccb209a01a2cc10c74c73c01 |
1.4 | import os
import abc
import json
import logging
import time
from tempfile import NamedTemporaryFile
import numpy as np
import torch
import torch.distributed as dist
from pycocotools.cocoeval import COCOeval
from .distributed import synchronize, is_main_process, all_gather_container
# FIXME experimenting with speedups for OpenImages eval, it's slow
#import pyximport; py_importer, pyx_importer = pyximport.install(pyimport=True)
import effdet.evaluation.detection_evaluator as tfm_eval
#pyximport.uninstall(py_importer, pyx_importer)
_logger = logging.getLogger(__name__)
__all__ = ['CocoEvaluator', 'PascalEvaluator', 'OpenImagesEvaluator', 'RadioGalaxyEvaluator' 'create_evaluator']
class Evaluator:
def __init__(self, distributed=False, pred_yxyx=False, score_thresh=0.001):
self.distributed = distributed
self.distributed_device = None
self.pred_yxyx = pred_yxyx
self.img_indices = []
self.predictions = []
self.score_thresh = score_thresh
def add_predictions(self, detections, target):
if self.distributed:
if self.distributed_device is None:
# cache for use later to broadcast end metric
self.distributed_device = detections.device
synchronize()
detections = all_gather_container(detections)
img_indices = all_gather_container(target['img_idx'])
if not is_main_process():
return
else:
img_indices = target['img_idx']
detections = detections.cpu().numpy()
img_indices = img_indices.cpu().numpy()
for img_idx, img_dets in zip(img_indices, detections):
self.img_indices.append(img_idx)
self.predictions.append(img_dets)
def _coco_predictions(self):
# generate coco-style predictions
coco_predictions = []
coco_ids = []
for img_idx, img_dets in zip(self.img_indices, self.predictions):
img_id = self._dataset.img_ids[img_idx]
coco_ids.append(img_id)
if self.pred_yxyx:
# to xyxy
img_dets[:, 0:4] = img_dets[:, [1, 0, 3, 2]]
# to xywh
img_dets[:, 2] -= img_dets[:, 0]
img_dets[:, 3] -= img_dets[:, 1]
for det in img_dets:
score = float(det[4])
if score < self.score_thresh: # stop when below this threshold, scores in descending order
break
coco_det = dict(
image_id=int(img_id),
bbox=det[0:4].tolist(),
score=score,
category_id=int(det[5]))
coco_predictions.append(coco_det)
return coco_predictions, coco_ids
@abc.abstractmethod
def evaluate(self, output_result_file=''):
pass
def save(self, result_file):
# save results in coco style, override to save in a alternate form
if not self.distributed or dist.get_rank() == 0:
assert len(self.predictions)
coco_predictions, coco_ids = self._coco_predictions()
json.dump(coco_predictions, open(result_file, 'w'), indent=4)
class CocoEvaluator(Evaluator):
def __init__(self, dataset, distributed=False, pred_yxyx=False):
super().__init__(distributed=distributed, pred_yxyx=pred_yxyx)
self._dataset = dataset.parser
self.coco_api = dataset.parser.coco
def reset(self):
self.img_indices = []
self.predictions = []
def evaluate(self, output_result_file=''):
if not self.distributed or dist.get_rank() == 0:
assert len(self.predictions)
coco_predictions, coco_ids = self._coco_predictions()
if output_result_file:
json.dump(coco_predictions, open(output_result_file, 'w'), indent=4)
results = self.coco_api.loadRes(output_result_file)
else:
with NamedTemporaryFile(prefix='coco_', suffix='.json', delete=False, mode='w') as tmpfile:
json.dump(coco_predictions, tmpfile, indent=4)
results = self.coco_api.loadRes(tmpfile.name)
try:
os.unlink(tmpfile.name)
except OSError:
pass
coco_eval = COCOeval(self.coco_api, results, 'bbox')
coco_eval.params.imgIds = coco_ids # score only ids we've used
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
metric = coco_eval.stats[0] # mAP 0.5-0.95
if self.distributed:
dist.broadcast(torch.tensor(metric, device=self.distributed_device), 0)
else:
metric = torch.tensor(0, device=self.distributed_device)
dist.broadcast(metric, 0)
metric = metric.item()
self.reset()
return metric
class RadioGalaxyEvaluator(Evaluator):
def __init__(self, dataset, distributed=False, pred_yxyx=False, score_thresh=0.1):
super().__init__(distributed=distributed, pred_yxyx=pred_yxyx, score_thresh=score_thresh)
self._dataset = dataset.parser
self.coco_api = dataset.parser.coco
def reset(self):
self.img_indices = []
self.predictions = []
def evaluate(self, output_result_file=''):
if not self.distributed or dist.get_rank() == 0:
assert len(self.predictions)
coco_predictions, coco_ids = self._coco_predictions()
if output_result_file:
json.dump(coco_predictions, open(output_result_file, 'w'), indent=4)
results = self.coco_api.loadRes(output_result_file)
else:
with NamedTemporaryFile(prefix='coco_', suffix='.json', delete=False, mode='w') as tmpfile:
json.dump(coco_predictions, tmpfile, indent=4)
results = self.coco_api.loadRes(tmpfile.name)
try:
os.unlink(tmpfile.name)
except OSError:
pass
coco_eval = COCOeval(self.coco_api, results, 'bbox')
coco_eval.params.imgIds = coco_ids # score only ids we've used
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
metric = coco_eval.stats[0] # mAP 0.5-0.95
if self.distributed:
dist.broadcast(torch.tensor(metric, device=self.distributed_device), 0)
else:
metric = torch.tensor(0, device=self.distributed_device)
dist.broadcast(metric, 0)
metric = metric.item()
self.reset()
return metric
class TfmEvaluator(Evaluator):
""" Tensorflow Models Evaluator Wrapper """
def __init__(
self, dataset, distributed=False, pred_yxyx=False, evaluator_cls=tfm_eval.ObjectDetectionEvaluator):
super().__init__(distributed=distributed, pred_yxyx=pred_yxyx)
self._evaluator = evaluator_cls(categories=dataset.parser.cat_dicts)
self._eval_metric_name = self._evaluator._metric_names[0]
self._dataset = dataset.parser
def reset(self):
self._evaluator.clear()
self.img_indices = []
self.predictions = []
def evaluate(self, output_result_file=''):
if not self.distributed or dist.get_rank() == 0:
for img_idx, img_dets in zip(self.img_indices, self.predictions):
gt = self._dataset.get_ann_info(img_idx)
self._evaluator.add_single_ground_truth_image_info(img_idx, gt)
bbox = img_dets[:, 0:4] if self.pred_yxyx else img_dets[:, [1, 0, 3, 2]]
det = dict(bbox=bbox, score=img_dets[:, 4], cls=img_dets[:, 5])
self._evaluator.add_single_detected_image_info(img_idx, det)
metrics = self._evaluator.evaluate()
_logger.info('Metrics:')
for k, v in metrics.items():
_logger.info(f'{k}: {v}')
map_metric = metrics[self._eval_metric_name]
if self.distributed:
dist.broadcast(torch.tensor(map_metric, device=self.distributed_device), 0)
else:
map_metric = torch.tensor(0, device=self.distributed_device)
wait = dist.broadcast(map_metric, 0, async_op=True)
while not wait.is_completed():
# wait without spinning the cpu @ 100%, no need for low latency here
time.sleep(0.5)
map_metric = map_metric.item()
if output_result_file:
self.save(output_result_file)
self.reset()
return map_metric
class PascalEvaluator(TfmEvaluator):
def __init__(self, dataset, distributed=False, pred_yxyx=False):
super().__init__(
dataset, distributed=distributed, pred_yxyx=pred_yxyx, evaluator_cls=tfm_eval.PascalDetectionEvaluator)
class OpenImagesEvaluator(TfmEvaluator):
def __init__(self, dataset, distributed=False, pred_yxyx=False):
super().__init__(
dataset, distributed=distributed, pred_yxyx=pred_yxyx, evaluator_cls=tfm_eval.OpenImagesDetectionEvaluator)
def create_evaluator(name, dataset, distributed=False, pred_yxyx=False, score_thresh=0.001):
# FIXME support OpenImages Challenge2019 metric w/ image level label consideration
if 'coco' in name:
return CocoEvaluator(dataset, distributed=distributed, pred_yxyx=pred_yxyx)
elif 'openimages' in name:
return OpenImagesEvaluator(dataset, distributed=distributed, pred_yxyx=pred_yxyx)
elif 'radiogalaxy' in name:
return RadioGalaxyEvaluator(dataset, distributed=distributed, pred_yxyx=pred_yxyx, score_thresh=score_thresh)
else:
return PascalEvaluator(dataset, distributed=distributed, pred_yxyx=pred_yxyx)
| [
"torch.distributed.get_rank",
"torch.tensor",
"torch.distributed.broadcast"
] | 1.4.0 | SKA-INAF/efficientdet-pytorch | 8967bab88288d11e5547a7efa391adc0c987be47 |
1.5 | # Copyright (c) 2019 Eric Steinberger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PokerRL.rl.neural.CardEmbedding import CardEmbedding
from PokerRL.rl.neural.LayerNorm import LayerNorm
class MainPokerModuleFLAT_Baseline(nn.Module):
def __init__(self,
env_bldr,
device,
mpm_args,
):
super().__init__()
self._args = mpm_args
self._env_bldr = env_bldr
self._device = device
self._board_start = self._env_bldr.obs_board_idxs[0]
self._board_stop = self._board_start + len(self._env_bldr.obs_board_idxs)
self.dropout = nn.Dropout(p=mpm_args.dropout)
self.card_emb = CardEmbedding(env_bldr=env_bldr, dim=mpm_args.dim, device=device)
if mpm_args.deep:
self.cards_fc_1 = nn.Linear(in_features=self.card_emb.out_size * 2,
out_features=mpm_args.dim * 3)
self.cards_fc_2 = nn.Linear(in_features=mpm_args.dim * 3, out_features=mpm_args.dim * 3)
self.cards_fc_3 = nn.Linear(in_features=mpm_args.dim * 3, out_features=mpm_args.dim)
self.history_1 = nn.Linear(in_features=self._env_bldr.pub_obs_size - self._env_bldr.obs_size_board,
out_features=mpm_args.dim)
self.history_2 = nn.Linear(in_features=mpm_args.dim, out_features=mpm_args.dim)
self.comb_1 = nn.Linear(in_features=2 * mpm_args.dim, out_features=mpm_args.dim)
self.comb_2 = nn.Linear(in_features=mpm_args.dim, out_features=mpm_args.dim)
else:
self.layer_1 = nn.Linear(in_features=self.card_emb.out_size * 2
+ self._env_bldr.pub_obs_size - self._env_bldr.obs_size_board,
out_features=mpm_args.dim)
self.layer_2 = nn.Linear(in_features=mpm_args.dim, out_features=mpm_args.dim)
self.layer_3 = nn.Linear(in_features=mpm_args.dim, out_features=mpm_args.dim)
if self._args.normalize:
self.norm = LayerNorm(mpm_args.dim)
self.to(device)
# print("n parameters:", sum(p.numel() for p in self.parameters() if p.requires_grad))
@property
def output_units(self):
return self._args.dim
@property
def device(self):
return self._device
def forward(self, pub_obses, range_idxs):
"""
1. do list -> padded
2. feed through pre-processing fc layers
3. PackedSequence (sort, pack)
4. rnn
5. unpack (unpack re-sort)
6. cut output to only last entry in sequence
Args:
pub_obses (list): list of np arrays of shape [np.arr([history_len, n_features]), ...)
range_idxs (LongTensor): range_idxs (one for each pub_obs) tensor([2, 421, 58, 912, ...])
"""
if isinstance(pub_obses, list):
pub_obses = torch.from_numpy(np.array(pub_obses)).to(self._device, torch.float32)
hist_o = torch.cat([
pub_obses[:, :self._board_start],
pub_obses[:, self._board_stop:]
], dim=-1)
# """""""""""""""""""""""
# Card embeddings
# """""""""""""""""""""""
range_idxs_0 = range_idxs // 10000 # Big hack! See LearnedBaselineSampler for the reverse opp
range_idxs_1 = range_idxs % 10000
card_o_0 = self.card_emb(pub_obses=pub_obses,
range_idxs=torch.where(range_idxs_0 == 8888, torch.zeros_like(range_idxs_0),
range_idxs_0))
card_o_0 = torch.where(range_idxs_0.unsqueeze(1).expand_as(card_o_0) == 8888,
torch.full_like(card_o_0, fill_value=-1),
card_o_0,
)
card_o_1 = self.card_emb(pub_obses=pub_obses,
range_idxs=torch.where(range_idxs_1 == 8888, torch.zeros_like(range_idxs_1),
range_idxs_1))
card_o_1 = torch.where(range_idxs_1.unsqueeze(1).expand_as(card_o_0) == 8888,
torch.full_like(card_o_1, fill_value=-1),
card_o_1,
)
card_o = torch.cat([card_o_0, card_o_1], dim=-1)
# """""""""""""""""""""""
# Network
# """""""""""""""""""""""
if self._args.dropout > 0:
A = lambda x: self.dropout(F.relu(x))
else:
A = lambda x: F.relu(x)
if self._args.deep:
card_o = A(self.cards_fc_1(card_o))
card_o = A(self.cards_fc_2(card_o) + card_o)
card_o = A(self.cards_fc_3(card_o))
hist_o = A(self.history_1(hist_o))
hist_o = A(self.history_2(hist_o) + hist_o)
y = A(self.comb_1(torch.cat([card_o, hist_o], dim=-1)))
y = A(self.comb_2(y) + y)
else:
y = torch.cat([hist_o, card_o], dim=-1)
y = A(self.layer_1(y))
y = A(self.layer_2(y) + y)
y = A(self.layer_3(y) + y)
# """""""""""""""""""""""
# Normalize last layer
# """""""""""""""""""""""
if self._args.normalize:
y = self.norm(y)
return y
class MPMArgsFLAT_Baseline:
def __init__(self,
deep=True,
dim=128,
dropout=0.0,
normalize=True,
):
self.deep = deep
self.dim = dim
self.dropout = dropout
self.normalize = normalize
def get_mpm_cls(self):
return MainPokerModuleFLAT_Baseline
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.full_like",
"torch.zeros_like",
"torch.nn.functional.relu"
] | 1.5 | EricSteinberger/DREAM | bfe21bbb0f60ab27a1af9774308efbbbd41e68c4 |
1.8 | import torch
import unittest
from fusion.model import AE
class TestAE(unittest.TestCase):
def test_forward(self):
# define parameters
dim_in = 1
dim_l = 4
input_size = 32
architecture = 'DcganAutoEncoder'
architecture_params = dict(
input_size = input_size,
dim_in = [dim_in],
dim_h = 2,
dim_l = dim_l
)
sources = [0]
batch_size = 2
# create model
model = AE(sources, architecture, architecture_params)
# create input
x = [torch.rand(batch_size, dim_in, input_size, input_size)]
# forward pass
output = model(x)
# check outputs
for _, z in output.z.items():
self.assertEqual(z.size(1), dim_l)
self.assertEqual(output.attrs['x'][0].size(), x[0].size())
self.assertEqual(output.attrs['x_hat'][0].size(), x[0].size())
if __name__ == '__main__':
unittest.main()
| [
"torch.rand"
] | 1.8.1 | Mrinal18/fusion | 34e563f2e50139385577c3880c5de11f8a73f220 |
1.3 | import copy
import os
import torch
from torch import nn
from torch import optim
from ray.tune import Trainable
N_LBFGS_STEPS_VALIDATION = 15
class PytorchTrainable(Trainable):
"""Abstract Trainable class for Pytorch models, which checkpoints the model
and the optimizer.
Subclass must initialize self.model and self.optimizer in _setup.
"""
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir, "model_optimizer.pth")
state = {'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict()}
torch.save(state, checkpoint_path)
return checkpoint_path
def _restore(self, checkpoint_path):
if hasattr(self, 'device'):
checkpoint = torch.load(checkpoint_path, self.device)
else:
checkpoint = torch.load(checkpoint_path)
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
class TrainableFixedData(PytorchTrainable):
"""Abstract Trainable class for Pytorch models with fixed data.
Subclass must initialize self.model, self.optimizer, and
self.n_steps_per_epoch in _setup, and have to implement self.loss().
"""
def loss(self):
raise NotImplementedError
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
loss = self.loss()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableMatrixFactorization(TrainableFixedData):
"""Abstract Trainable class for Pytorch models that factor a target matrix.
Subclass must initialize self.model, self.optimizer,
self.n_steps_per_epoch, self.n_epochs_per_validation, self.target_matrix,
and self.input in _setup, and may override self.freeze() to freeze model
(e.g. taking argmax of logit instead of logit).
"""
def forward(self):
return self.model(self.input)
def loss(self):
# Take transpose since the transform acts on the rows of the input
output = self.forward().transpose(0, 1)
if self.target_matrix.dim() == 2 and output.dim() == 3: # Real target matrix, take real part
output = output[:, :, 0]
return nn.functional.mse_loss(output, self.target_matrix)
def freeze(self):
pass
def polish(self, nmaxsteps=50, patience=5, threshold=1e-10, save_to_self_model=False):
if not save_to_self_model:
model_bak = self.model
self.model = copy.deepcopy(self.model)
self.freeze()
optimizer = optim.LBFGS(filter(lambda p: p.requires_grad, self.model.parameters()))
def closure():
optimizer.zero_grad()
loss = self.loss()
loss.backward()
return loss
n_bad_steps = 0
best_loss = float('inf')
for i in range(nmaxsteps):
loss = optimizer.step(closure)
if loss.item() < best_loss - threshold:
best_loss = loss.item()
n_bad_steps = 0
else:
n_bad_steps += 1
if n_bad_steps > patience:
break
if not save_to_self_model:
self.model = model_bak
return loss.item()
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
loss = self.loss()
loss.backward()
self.optimizer.step()
loss = loss.item()
if (self._iteration + 1) % self.n_epochs_per_validation == 0:
loss = min(loss, self.polish(N_LBFGS_STEPS_VALIDATION, save_to_self_model=False))
return {'negative_loss': -loss, 'mean_loss': loss, 'nparameters': self.nparameters}
| [
"torch.nn.functional.mse_loss",
"torch.save",
"torch.load"
] | 1.3 | sfox14/butterfly | 13cc15cee5bdb7adaf376219aaf20fab0459e9ef |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpointing with preceding recomputation.
PyTorch already provides the official checkpointing utilities in
:mod:`torch.utils.checkpoint`. The official checkpointing combines
recomputation and recursive backpropagation into one autograd function named
``CheckpointFunction``. Hence, the recomputation can be started only when the
gradients arrive to the function. In Pipe, the recomputation needs to precede
the gradient arrival to minimize the GPU idle time.
We solve this problem by introducing separate autograd functions named
:class:`Recompute` and :class:`Checkpoint`. Each function represents
recomputation and recursive backpropagation, respectively. We can manipulate
the control flow in aspect of both the autograd engine and CUDA with a pair of
the functions.
Specifically, we place CUDA stream synchronization between :class:`Recompute`
and :class:`Checkpoint` to delay only :class:`Checkpoint` until the gradient is
copied entirely.
"""
from collections import deque
from contextlib import contextmanager
import threading
from typing import TYPE_CHECKING, Deque, Generator, List, Optional, Tuple, Union
import torch
from torch import ByteTensor, Tensor
import torch.autograd
from .dependency import fork, join
from .microbatch import Batch
from .phony import get_phony
__all__ = ["is_checkpointing", "is_recomputing"]
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
# Types for shared memory between Checkpoint and Recompute.
Recomputed = Tuple[TensorOrTensors, Tensors] # (output, input_leaf)
RNGStates = Tuple[ByteTensor, Optional[ByteTensor]] # (cpu_rng_state, gpu_rng_state)
if TYPE_CHECKING:
from typing_extensions import Protocol
else:
Protocol = object
# Protocol with __call__ instead of Callable can be used as an attribute type.
# See: https://github.com/python/mypy/issues/708#issuecomment-561735949
class Function(Protocol):
def __call__(self, input: TensorOrTensors) -> TensorOrTensors:
...
class Checkpointing:
"""Generates a pair of :class:`Checkpoint` and :class:`Recompute`."""
def __init__(self, function: Function, batch: Batch) -> None:
self.function = function
self.batch = batch
# Shared memory between Checkpoint and Recompute. 1-length deque is
# used for mutability and length limitation.
self.recomputed: Deque[Recomputed] = deque(maxlen=1)
self.rng_states: Deque[RNGStates] = deque(maxlen=1)
def checkpoint(self) -> Batch:
"""Returns a batch applied by :class:`Checkpoint`."""
input_atomic = self.batch.atomic
input = tuple(self.batch)
# Use a phony which requires grad to ensure that Checkpoint can be
# tracked by the autograd engine even when none of the input tensors
# require grad.
phony = get_phony(self.batch[0].device, requires_grad=True)
output = Checkpoint.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *input)
# Gradients are only supported for float Tensors.
if isinstance(output, tuple):
output = tuple([x if x.is_floating_point() else x.detach() for x in output])
return Batch(output, self.batch.index)
def recompute(self, batch: Batch) -> None:
"""Applies :class:`Recompute` to the batch in place."""
input_atomic = self.batch.atomic
input = tuple(self.batch)
# batch[0] is always requiring grad, because it has been passed
# checkpoint with a phony requiring grad.
batch[0], phony = fork(batch[0])
phony = Recompute.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *input)
batch[0] = join(batch[0], phony)
class ThreadLocal(threading.local):
def __init__(self) -> None:
self.is_checkpointing = False
self.is_recomputing = False
thread_local = ThreadLocal()
@contextmanager
def enable_checkpointing() -> Generator[None, None, None]:
"""Makes :func:`is_checkpointing` return :data:`True` within a context."""
orig = thread_local.is_checkpointing
thread_local.is_checkpointing = True
try:
yield
finally:
thread_local.is_checkpointing = orig
@contextmanager
def enable_recomputing() -> Generator[None, None, None]:
"""Makes :func:`is_recomputing` return :data:`True` within a context."""
orig = thread_local.is_recomputing
thread_local.is_recomputing = True
try:
yield
finally:
thread_local.is_recomputing = orig
def is_checkpointing() -> bool:
"""Whether the current forward propagation is under checkpointing.
Returns:
bool: :data:`True` if it's under checkpointing.
"""
return thread_local.is_checkpointing
def is_recomputing() -> bool:
"""Whether the current forward propagation is under checkpoint
recomputation. Use this to prevent duplicated side-effects at forward
propagation::
class Counter(nn.Module):
def __init__(self):
super().__init__()
self.counter = 0
def forward(self, input):
if not is_recomputing():
self.counter += 1
return input
Returns:
bool: :data:`True` if it's under checkpoint recomputation.
.. seealso:: :ref:`Detecting Recomputation`
"""
return thread_local.is_recomputing
class Context:
"""The common interface between the :class:`Checkpoint` and
:class:`Recompute` context.
"""
recomputed: Deque[Recomputed]
rng_states: Deque[RNGStates]
function: Function
input_atomic: bool
saved_tensors: Tuple[Tensor, ...]
def save_for_backward(self, *tensors: Tensor) -> None: # pragma: no cover
pass
def save_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> None:
""":meth:`Checkpoint.forward` captures the current PyTorch's random number
generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.
.. seealso:: :ref:`Referential Transparency`
"""
cpu_rng_state = torch.get_rng_state()
gpu_rng_state: Optional[ByteTensor]
if device.type == "cuda":
gpu_rng_state = torch.cuda.get_rng_state(device)
else:
gpu_rng_state = None
rng_states.clear()
rng_states.append((cpu_rng_state, gpu_rng_state))
@contextmanager
def restore_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> Generator[None, None, None]:
""":meth:`Recompute.backward` restores the random number generator states
captured by :func:`save_rng_states` within its context.
.. seealso:: :ref:`Referential Transparency`
"""
cpu_rng_state, gpu_rng_state = rng_states[0]
gpu_devices: List[torch.device] = []
if device.type == "cuda":
gpu_devices.append(device)
with torch.random.fork_rng(gpu_devices):
torch.set_rng_state(cpu_rng_state)
if gpu_rng_state is not None:
torch.cuda.set_rng_state(gpu_rng_state, device)
yield
class Checkpoint(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(
ctx: Context,
phony: Tensor,
recomputed: Deque[Recomputed],
rng_states: Deque[RNGStates],
function: Function,
input_atomic: bool,
*input: Tensor,
) -> TensorOrTensors:
ctx.recomputed = recomputed
ctx.rng_states = rng_states
save_rng_states(input[0].device, ctx.rng_states)
ctx.function = function
ctx.input_atomic = input_atomic
ctx.save_for_backward(*input)
with torch.no_grad(), enable_checkpointing():
output = function(input[0] if input_atomic else input)
return output
@staticmethod
def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]: # pragma: no cover
output, input_leaf = ctx.recomputed.pop()
if isinstance(output, tuple):
tensors = output
else:
tensors = (output,)
if any(y.requires_grad for y in tensors):
tensors = tuple([x for x in tensors if x.requires_grad])
torch.autograd.backward(tensors, grad_output)
grad_input: List[Optional[Tensor]] = [None, None, None, None, None]
grad_input.extend(x.grad for x in input_leaf)
return tuple(grad_input)
class Recompute(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(
ctx: Context,
phony: Tensor,
recomputed: Deque[Recomputed],
rng_states: Deque[RNGStates],
function: Function,
input_atomic: bool,
*input: Tensor,
) -> Tensor:
ctx.recomputed = recomputed
ctx.rng_states = rng_states
ctx.function = function
ctx.input_atomic = input_atomic
ctx.save_for_backward(*input)
return phony
@staticmethod
def backward(ctx: Context, *grad_output: Tensor) -> Tuple[None, ...]: # pragma: no cover
input = ctx.saved_tensors
input_leaf = tuple(x.detach().requires_grad_(x.requires_grad) for x in input)
with restore_rng_states(input[0].device, ctx.rng_states):
with torch.enable_grad(), enable_recomputing():
output = ctx.function(input_leaf[0] if ctx.input_atomic else input_leaf)
ctx.recomputed.append((output, input_leaf))
grad_input: List[None] = [None, None, None, None, None]
grad_input.extend(None for _ in ctx.saved_tensors)
return tuple(grad_input)
| [
"torch.autograd.backward",
"torch.get_rng_state",
"torch.no_grad",
"torch.enable_grad",
"torch.cuda.set_rng_state",
"torch.cuda.get_rng_state",
"torch.set_rng_state",
"torch.random.fork_rng"
] | 1.6.0 | zhaojuanmao/fairscale | 61ece000bd1b70029270e2dccab66ffa2ca16d51 |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from threading import Event, Lock, Thread
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import torch
from torch import nn
from torch.distributed import ProcessGroup, rpc
from torch.distributed.distributed_c10d import _get_global_rank
from fairscale.nn.model_parallel.initialize import get_pipeline_parallel_group
from .async_pipe import AsyncPipe
from .types import EVENT_LOOP_QUEUE, PipeMessage, TensorOrTensors
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
PipeModel: AsyncPipe
PipeResult: TensorOrTensors
SizeOrSizes = Union[torch.Size, List[torch.Size]]
DtypeOrDtypes = Union[torch.dtype, List[torch.dtype]]
def set_device_based_on_group(group: ProcessGroup) -> None:
# torch.cuda.set_device(group.rank() % torch.cuda.device_count())
torch.cuda.set_device(torch.distributed.get_rank() % torch.cuda.device_count())
def get_shapes(tensor: TensorOrTensors) -> SizeOrSizes:
if isinstance(tensor, torch.Tensor):
return tensor.shape
else:
return [t.shape for t in tensor]
def get_dtype(tensor: TensorOrTensors) -> DtypeOrDtypes:
if isinstance(tensor, torch.Tensor):
return tensor.dtype
else:
return [t.dtype for t in tensor]
def get_global_ranks_from_group(group: ProcessGroup) -> List[int]:
return [_get_global_rank(group, r) for r in range(group.size())]
class PipeBackRedirect(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(ctx, inputs, dest, event, message, transport, futures):
ctx.dest = dest
ctx.event = event
ctx.message = message
ctx.transport = transport
ctx.futures = futures
return inputs
@staticmethod
# type: ignore
def backward(ctx, *grad):
ctx.message.tensors = tuple(grad)
ctx.transport.send_message(ctx.message, sync=False, skip_header=True)
ctx.event.set()
# torch.futures.wait_all(ctx.futures)
return (None, None, None, None, None, None)
def callback_with_model(callback: Callable[[Any, AsyncPipe], None], ctx: Any) -> None:
try:
group = get_pipeline_parallel_group() # FIXME(tom) handle dynamic group
set_device_based_on_group(group)
with PipeModel.lock:
callback(ctx, PipeModel)
except Exception as e:
print(f"callback_with_model got {e}")
class PipeRPCWrapper(nn.Module):
"""A wrapper for Pipe to control the entire pipeline from a single process.
Typical usecase would have rank 0 construct `PipeRPCWrapper` and run the
training loop as normal, and all other ranks would call
`torch.distributed.rpc.shutdown()`
To run code on each worker, e.g. to run the optimizer, use `foreach_worker`
"""
def __init__(self, *args: Any, **kwargs: Any):
super().__init__()
self.group = cast(ProcessGroup, kwargs.get("group")) or get_pipeline_parallel_group()
assert self.group.rank() == 0
self.lock = Lock()
if True:
assert (
self.group == get_pipeline_parallel_group()
), "Can't pickle groups, so group must be `get_pipeline_parallel_group()`"
kwargs["group"] = None
else:
kwargs["group"] = self.group
kwargs["input_device"] = torch.device("cuda", torch.cuda.current_device())
self.model = AsyncPipe(*args, **kwargs)
self.worker_map = kwargs["worker_map"]
self._foreach_worker(self._register_remote_model, args=(args, kwargs))
self.model.cuda()
def _get_rpc_name(self, rank: int) -> str:
return self.worker_map[_get_global_rank(self.group, rank)]
def _foreach_worker(self, callback: Callable, args: Any = None) -> None:
futures = [rpc.rpc_async(self._get_rpc_name(rank), callback, args=args) for rank in range(1, self.group.size())]
futures = [f.wait() for f in futures]
def foreach_worker(
self, callback: Callable[[Any, AsyncPipe], None], ctx: Any = None, *, include_self: bool = False
) -> None:
"""Call `callback` on each worker with the `ctx` and model local to that
worker. e.g.
def register_optimizer(ctx, model):
args, kwargs = ctx
model.optimizer = torch.optim.SGD(model.parameters(), *args, **kwargs)
pipe_model = PipeRPCWrapper( ... )
pipe_model.foreach_worker(
register_optimizer,
([], {"lr" : 0.01, "momentum" : 0.9})
)
"""
self._foreach_worker(callback_with_model, args=(callback, ctx))
if include_self:
with self.model.lock:
callback(ctx, self.model)
def forward(self, tensor: TensorOrTensors) -> TensorOrTensors: # type: ignore
shape = get_shapes(tensor)
dtype = get_dtype(tensor)
if isinstance(tensor, torch.Tensor):
num_tensors = 1
else:
num_tensors = len(tensor)
futures = [
rpc.rpc_async(self._get_rpc_name(rank), self._model_forward, args=(self.model.training, shape, dtype))
for rank in range(1, self.group.size())
]
if self.model.final_stage:
return self.model(tensor)
else:
event = Event()
t = Thread(target=self._model_forward_first_stage, args=(tensor, event))
t.start()
shape, dtype = futures.pop().wait()
dest_rank = self.group.size() - 1
dest = self._get_rpc_name(dest_rank)
dest_global_rank = _get_global_rank(self.group, dest_rank)
src_global_rank = torch.distributed.get_rank()
queue = EVENT_LOOP_QUEUE
activations = PipeMessage(dest_global_rank, src_global_rank, queue_name=queue, tensor_count=num_tensors)
grads = PipeMessage(src_global_rank, dest_global_rank, queue_name=queue, tensor_count=num_tensors)
back_fut = rpc.rpc_async(
dest, self._send_result_and_do_backwards, args=(self.model.training, activations, grads)
)
futures.append(back_fut)
result = self._recv_result(self.model, shape, dtype, activations)
if isinstance(result, torch.Tensor):
result.requires_grad_()
else:
for r in result:
r.requires_grad_()
assert self.model.pipeline
return PipeBackRedirect.apply(
result, dest_global_rank, event, grads, self.model.pipeline.transport, futures
)
@property
def final_stage(self) -> bool:
return self.model.final_stage
@staticmethod
def _recv_result(
model: AsyncPipe, shapes: SizeOrSizes, dtypes: DtypeOrDtypes, message: PipeMessage
) -> TensorOrTensors:
group = get_pipeline_parallel_group()
set_device_based_on_group(group)
assert model.pipeline
transport = model.pipeline.transport
if isinstance(shapes, torch.Size):
message.tensor_shapes = [cast(torch.Size, shapes)]
message.tensor_dtypes = [cast(torch.dtype, dtypes)]
message = transport.recv_message_tensors(message)
return message.tensors[0]
else:
message.tensor_shapes = cast(List[torch.Size], shapes)
message.tensor_dtypes = cast(List[torch.dtype], dtypes)
message = transport.recv_message_tensors(message)
return message.tensors
@staticmethod
def _send_result_and_do_backwards(training: bool, message: PipeMessage, grads_message: PipeMessage) -> None:
group = get_pipeline_parallel_group()
set_device_based_on_group(group)
result = PipeResult
model = PipeModel
if isinstance(result, torch.Tensor):
result = tuple([result])
message.tensors = tuple(result)
assert model.pipeline
transport = model.pipeline.transport
transport.send_message(message, sync=False, skip_header=True)
if training:
grads_message.tensor_shapes = [r.shape for r in result]
grads_message.tensor_dtypes = [r.dtype for r in result]
grads_message = transport.recv_message_tensors(grads_message)
with model.lock:
torch.autograd.backward(result, grads_message.tensors, retain_graph=True)
@staticmethod
def _register_remote_model(args: List[Any], kwargs: Dict[str, Any]) -> None:
group = get_pipeline_parallel_group() # FIXME(tom) handle dynamic group
set_device_based_on_group(group)
kwargs["group"] = group
kwargs["input_device"] = torch.device("cuda", torch.cuda.current_device())
model = AsyncPipe(*args, **kwargs)
model.cuda()
global PipeModel
PipeModel = model
@staticmethod
def _model_forward(
training: bool, shape: torch.Size, dtype: torch.dtype
) -> Optional[Tuple[SizeOrSizes, DtypeOrDtypes]]:
try:
if isinstance(shape, torch.Size):
tensor = torch.empty(shape, dtype=dtype)
else:
tensor = tuple([torch.empty(s, dtype=d) for s, d in zip(shape, dtype)])
model = PipeModel
assert model.group
set_device_based_on_group(model.group)
model.train(training)
result = model(tensor)
if model.final_stage:
global PipeResult
PipeResult = result
return (get_shapes(result), get_dtype(result))
return None
except Exception as e:
print(f"_model_forward got {e}")
raise e
def _model_forward_first_stage(self, tensor: TensorOrTensors, event: Event) -> None:
try:
assert self.model.group
set_device_based_on_group(self.model.group)
self.model(tensor, event=event)
except Exception as e:
print(f"_model_forward got {e}")
raise e
| [
"torch.autograd.backward",
"torch.cuda.current_device",
"torch.cuda.device_count",
"torch.distributed.rpc.rpc_async",
"torch.distributed.get_rank",
"torch.empty",
"torch.distributed.distributed_c10d._get_global_rank"
] | 1.6.0 | zhaojuanmao/fairscale | 61ece000bd1b70029270e2dccab66ffa2ca16d51 |
1.6 | # coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from fairscale.nn.model_parallel import initialize as mpu
from fairscale.nn.model_parallel import random
from fairscale.nn.model_parallel.random import get_cuda_rng_tracker, model_parallel_cuda_manual_seed
from fairscale.utils.testing import dist_init, spawn_for_all_world_sizes
def run_test_set_cuda_rng_state(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing set_rng_state with size {} ...".format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
size = 123
seed = 1234
torch.cuda.manual_seed(1234)
tensor = torch.cuda.FloatTensor(size)
# Get the state
rng_state = torch.cuda.get_rng_state()
rng_state_copy = rng_state.clone()
# Do some stuff.
for _ in range(5):
torch.randn(size, out=tensor)
result_1 = tensor.clone()
assert rng_state.sub(rng_state_copy).max() == 0
assert torch.cuda.get_rng_state().sub(rng_state_copy).max() > 0
# State should be different.
new_rng_state = torch.cuda.get_rng_state()
max_diff = new_rng_state.sub(rng_state).max()
print(
" max diff in rng state (should be non-zero) on global rank {}: {}".format(
torch.distributed.get_rank(), max_diff
)
)
assert max_diff > 0
# Reset the rng state and do the same stuff.
random._set_cuda_rng_state(rng_state)
for _ in range(5):
torch.randn(size, out=tensor)
random._set_cuda_rng_state(rng_state)
for _ in range(5):
torch.randn(size, out=tensor)
result_2 = tensor.clone()
# Results should be the same
error = result_2.sub(result_1).abs().max()
print(
" max error in generated tensors (should be zero) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
# Input state should have remained intact.
error = rng_state.sub(rng_state_copy).max()
print(
" max error in rng state (should be zero) on global rank {}: {}".format(torch.distributed.get_rank(), error)
)
assert error == 0
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def run_test_cuda_rng_tracker(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing cuda rng tracker with size {} ...".format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
seed_1 = 1234
seed_2 = 4321
size = [12, 21]
tensor = torch.cuda.FloatTensor(size)
# Set to seed_1 and generate two tensors.
torch.cuda.manual_seed(seed_1)
torch.randn(size, out=tensor)
target_11 = tensor.clone()
torch.randn(size, out=tensor)
target_12 = tensor.clone()
# Set to seed_2 and generate two tensors.
torch.cuda.manual_seed(seed_2)
torch.randn(size, out=tensor)
target_21 = tensor.clone()
torch.randn(size, out=tensor)
target_22 = tensor.clone()
# Now if we interleave seed_1 and seed_2,
# we should still get the same tensors
torch.cuda.manual_seed(seed_1)
get_cuda_rng_tracker().add("test", seed_2)
torch.randn(size, out=tensor)
result_11 = tensor.clone()
with get_cuda_rng_tracker().fork("test"):
torch.randn(size, out=tensor)
result_21 = tensor.clone()
torch.randn(size, out=tensor)
result_12 = tensor.clone()
with get_cuda_rng_tracker().fork("test"):
torch.randn(size, out=tensor)
result_22 = tensor.clone()
diff = result_11.sub(result_21).abs().max()
diff = min(diff, result_12.sub(result_22).abs().max())
print(
" max diff in generated tensors (should be non-zero) on global rank {}: {}".format(
torch.distributed.get_rank(), diff
)
)
assert diff > 1.0e-6
error = max(result_11.sub(target_11).abs().max(), result_12.sub(target_12).abs().max())
error = max(error, result_21.sub(target_21).abs().max())
error = max(error, result_22.sub(target_22).abs().max())
print(
" max error in generated tensors (should be zero) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
# Reset the tracker
get_cuda_rng_tracker().reset()
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def run_test_model_parallel_cuda_manual_seed(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing model parallel cuda manual seed with size {} ...".format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
model_parallel_cuda_manual_seed(12345)
assert torch.cuda.initial_seed() == 12345
with get_cuda_rng_tracker().fork():
assert torch.cuda.initial_seed() == (12345 + 2718 + mpu.get_model_parallel_rank())
# Reset the tracker
get_cuda_rng_tracker().reset()
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def test_set_cuda_rng_state():
spawn_for_all_world_sizes(run_test_set_cuda_rng_state)
def test_cuda_rng_tracker():
spawn_for_all_world_sizes(run_test_cuda_rng_tracker)
def test_model_parallel_cuda_manual_seed():
spawn_for_all_world_sizes(run_test_model_parallel_cuda_manual_seed)
| [
"torch.cuda.manual_seed",
"torch.cuda.get_rng_state",
"torch.cuda.initial_seed",
"torch.cuda.FloatTensor",
"torch.distributed.get_rank",
"torch.distributed.barrier",
"torch.randn"
] | 1.6.0 | zhaojuanmao/fairscale | 61ece000bd1b70029270e2dccab66ffa2ca16d51 |
1.6 | import torch
import torch.nn as nn
import random as rd
import sys
sys.path.insert(0, 'networks')
from Attention import TempAttention
from memory_rand import MemoryRamTwoStreamModule, MemoryRamModule, MMModule
class HME(nn.Module):
def __init__(self, vid_encoder, qns_encoder, ans_decoder, max_len_v, max_len_q, device, input_drop_p=0.2):
"""
Heterogeneous memory enhanced multimodal attention model for video question answering (CVPR19)
"""
super(HME, self).__init__()
self.vid_encoder = vid_encoder
self.qns_encoder = qns_encoder
self.ans_decoder = ans_decoder
dim = qns_encoder.dim_hidden
self.temp_att_a = TempAttention(dim * 2, dim * 2, hidden_dim=256)
self.temp_att_m = TempAttention(dim * 2, dim * 2, hidden_dim=256)
self.mrm_vid = MemoryRamTwoStreamModule(dim, dim, max_len_v, device)
self.mrm_txt = MemoryRamModule(dim, dim, max_len_q, device)
self.mm_module_v1 = MMModule(dim, input_drop_p, device)
self.linear_vid = nn.Linear(dim*2, dim)
self.linear_qns = nn.Linear(dim*2, dim)
self.linear_mem = nn.Linear(dim*2, dim)
self.vq2word_hme = nn.Linear(dim*3, dim*2)
self._init_weights()
self.device = device
def _init_weights(self):
"""
initialize the linear weights
:return:
"""
nn.init.xavier_normal_(self.linear_vid.weight)
nn.init.xavier_normal_(self.linear_qns.weight)
nn.init.xavier_normal_(self.linear_mem.weight)
nn.init.xavier_normal_(self.vq2word_hme.weight)
def forward(self, vid_feats, qns, qns_lengths, ans, ans_lengths, teacher_force_ratio=0.5, iter_num=3, mode='train'):
"""
"""
outputs_app_l1, outputs_app_l2, outputs_motion_l1, outputs_motion_l2 = self.vid_encoder(vid_feats) #(batch_size, fnum, feat_dim)
outputs_app = torch.cat((outputs_app_l1, outputs_app_l2), dim=-1)
outputs_motion = torch.cat((outputs_motion_l1, outputs_motion_l2), dim=-1)
batch_size, fnum, vid_feat_dim = outputs_app.size()
qns_output, qns_hidden = self.qns_encoder(qns, qns_lengths)
# print(qns_output.shape, qns_hidden[0].shape) #torch.Size([10, 23, 256]) torch.Size([2, 10, 256])
# qns_output = qns_output.permute(1, 0, 2)
batch_size, seq_len, qns_feat_dim = qns_output.size()
qns_embed = qns_hidden[0].permute(1, 0, 2).contiguous().view(batch_size, -1) #(batch_size, feat_dim)
# Apply temporal attention
att_app, beta_app = self.temp_att_a(qns_embed, outputs_app)
att_motion, beta_motion = self.temp_att_m(qns_embed, outputs_motion)
tmp_app_motion = torch.cat((outputs_app_l2[:, -1, :], outputs_motion_l2[:, -1, :]), dim=-1)
mem_output = torch.zeros(batch_size, vid_feat_dim).to(self.device)
for bs in range(batch_size):
mem_ram_vid = self.mrm_vid(outputs_app_l2[bs], outputs_motion_l2[bs], fnum)
cur_qns = qns_output[bs][:qns_lengths[bs]]
mem_ram_txt = self.mrm_txt(cur_qns, qns_lengths[bs]) #should remove padded zeros
mem_output[bs] = self.mm_module_v1(tmp_app_motion[bs].unsqueeze(0), mem_ram_vid, mem_ram_txt, iter_num)
"""
(64, 256) (22, 256) (1, 512)
"""
app_trans = torch.tanh(self.linear_vid(att_app))
motion_trans = torch.tanh(self.linear_vid(att_motion))
mem_trans = torch.tanh(self.linear_mem(mem_output))
encoder_outputs = torch.cat((app_trans, motion_trans, mem_trans), dim=1)
decoder_inputs = self.vq2word_hme(encoder_outputs)
hidden = qns_hidden
if mode == 'train':
vocab_size = self.ans_decoder.vocab_size
ans_len = ans.shape[1]
input = ans[:, 0]
outputs = torch.zeros(batch_size, ans_len, vocab_size).to(self.device)
for t in range(0, ans_len):
output, hidden = self.ans_decoder(decoder_inputs, hidden, input)
outputs[:, t] = output
teacher_force = rd.random() < teacher_force_ratio
top1 = output.argmax(1)
input = ans[:, t] if teacher_force else top1
else:
start = torch.LongTensor([1] * batch_size).to(self.device)
outputs = self.ans_decoder.sample(decoder_inputs, hidden, start)
return outputs | [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.LongTensor",
"torch.nn.init.xavier_normal_"
] | 1.6.0 | doc-doc/NExT-OE | a45d81a48ab5ccc45ff6f7bea60597cc59bc546e |
1.5 | from torch import nn
from torch.autograd import Variable
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import numpy as np
# from models.vgg_tro_channel1 import vgg16_bn
from models.vgg_tro_channel3 import vgg16_bn, vgg19_bn
# torch.cuda.set_device(1)
DROP_OUT = False
LSTM = False
SUM_UP = True
PRE_TRAIN_VGG = True
class Encoder(nn.Module):
def __init__(self, hidden_size, height, width, bgru, step, flip):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.height = height
self.width = width
self.bi = bgru
self.step = step
self.flip = flip
self.n_layers = 2
self.dropout = 0.5
# self.layer = vgg16_bn(PRE_TRAIN_VGG)
self.layer = vgg19_bn(PRE_TRAIN_VGG)
if DROP_OUT:
self.layer_dropout = nn.Dropout2d(p=0.5)
if self.step is not None:
# self.output_proj = nn.Linear((((((self.height-2)//2)-2)//2-2-2-2)//2)*128*self.step, self.hidden_size)
self.output_proj = nn.Linear(
self.height // 16 * 512 * self.step, self.height // 16 * 512
)
if LSTM:
RNN = nn.LSTM
else:
RNN = nn.GRU
if self.bi: # 8: 3 MaxPool->2**3 128: last hidden_size of layer4
self.rnn = RNN(
self.height // 16 * 512,
self.hidden_size,
self.n_layers,
dropout=self.dropout,
bidirectional=True,
)
if SUM_UP:
self.enc_out_merge = (
lambda x: x[:, :, : x.shape[-1] // 2] + x[:, :, x.shape[-1] // 2 :]
)
self.enc_hidden_merge = lambda x: (x[0] + x[1]).unsqueeze(0)
else:
self.rnn = RNN(
self.height // 16 * 512,
self.hidden_size,
self.n_layers,
dropout=self.dropout,
bidirectional=False,
)
# (32, 1, 80, 1400)
def forward(self, in_data, in_data_len, hidden=None):
batch_size = in_data.shape[0]
out = self.layer(in_data) # torch.Size([32, 512, 4, 63])
if DROP_OUT and self.training:
out = self.layer_dropout(out)
# out.register_hook(print)
out = out.permute(3, 0, 2, 1) # (width, batch, height, channels)
out.contiguous()
# out = out.view(-1, batch_size, (((((self.height-2)//2)-2)//2-2-2-2)//2)*128) # (t, b, f) (173, 32, 1024)
out = out.reshape(-1, batch_size, self.height // 16 * 512)
if self.step is not None:
time_step, batch_size, n_feature = out.shape[0], out.shape[1], out.shape[2]
out_short = Variable(
torch.zeros(time_step // self.step, batch_size, n_feature * self.step)
).cuda() # t//STEP, b, f*STEP
for i in range(0, time_step // self.step):
part_out = [out[j] for j in range(i * self.step, (i + 1) * self.step)]
# reverse the image feature map
out_short[i] = torch.cat(part_out, 1) # b, f*STEP
out = self.output_proj(out_short) # t//STEP, b, hidden_size
width = out.shape[0]
src_len = in_data_len.numpy() * (width / self.width)
src_len = src_len + 0.999 # in case of 0 length value from float to int
src_len = src_len.astype("int")
out = pack_padded_sequence(out, src_len.tolist(), batch_first=False)
output, hidden = self.rnn(out, hidden)
# output: t, b, f*2 hidden: 2, b, f
output, output_len = pad_packed_sequence(output, batch_first=False)
if self.bi and SUM_UP:
output = self.enc_out_merge(output)
# hidden = self.enc_hidden_merge(hidden)
# # output: t, b, f hidden: b, f
odd_idx = [1, 3, 5, 7, 9, 11]
hidden_idx = odd_idx[: self.n_layers]
final_hidden = hidden[hidden_idx]
# if self.flip:
# hidden = output[-1]
# #hidden = hidden.permute(1, 0, 2) # b, 2, f
# #hidden = hidden.contiguous().view(batch_size, -1) # b, f*2
# else:
# hidden = output[0] # b, f*2
return output, final_hidden # t, b, f*2 b, f*2
# matrix: b, c, h, w lens: list size of batch_size
def conv_mask(self, matrix, lens):
lens = np.array(lens)
width = matrix.shape[-1]
lens2 = lens * (width / self.width)
lens2 = lens2 + 0.999 # in case le == 0
lens2 = lens2.astype("int")
matrix_new = matrix.permute(0, 3, 1, 2) # b, w, c, h
matrix_out = Variable(torch.zeros(matrix_new.shape)).cuda()
for i, le in enumerate(lens2):
if self.flip:
matrix_out[i, -le:] = matrix_new[i, -le:]
else:
matrix_out[i, :le] = matrix_new[i, :le]
matrix_out = matrix_out.permute(0, 2, 3, 1) # b, c, h, w
return matrix_out
if __name__ == "__main__":
print(vgg16_bn())
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.Dropout2d"
] | 1.5.1 | MattAlexMiracle/SmartPatch | c485cb433d8e085d6eae10a335ee19f5e6c1a41c |
1.7 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 3 13:23:59 2021
@author: th
"""
import torch
from torch.nn import ReLU, Linear, Softmax, SmoothL1Loss, Tanh, LeakyReLU
from torch_geometric.nn import GCNConv, global_max_pool, global_mean_pool, SGConv, GNNExplainer, SAGEConv, GATConv, FastRGCNConv, GraphConv
import numpy as np
import matplotlib.pyplot as plt
import sys
import torch.nn.functional as F
import torch_optimizer as optim
import gnn_torch_models
import random
from sklearn.preprocessing import StandardScaler as SS
# torch.set_default_dtype(torch.float)
def standardscaler_transform(sc_feat_pure):
scaler = SS()
scaler.fit(sc_feat_pure)
transformed=scaler.transform(sc_feat_pure)
return transformed, scaler
def batch_split(nodes_cp, full_index, ii):
test_x = nodes_cp[ii]
train_idx=np.setxor1d(full_index, ii)
train_x = nodes_cp[train_idx]
if(len(train_x[0].shape)==1):
train_concat = flatten_list_1d(train_x)
else:
train_concat = []
for jj, x in enumerate(train_x):
if(jj==0):
train_concat = x
else:
train_concat= np.vstack((train_concat, x))
return train_concat, test_x
def make_diag_batch_FC(FCs):
count=0
for FC in FCs:
count+=FC.shape[0]
#gen mat
batch_FC = np.zeros((count,count))
size_log = 0
for FC in FCs:
size = FC.shape[0]
batch_FC[size_log:size_log+size, size_log:size_log+size]=FC
size_log += size
return batch_FC
def flatten_list_1d(act_ratio):
ph = np.empty((1,0))
ph = np.squeeze(ph)
for entry in act_ratio:
ph = np.concatenate((ph, entry))
return ph
def batch_split_x(nodes_cp, full_index, ii, chip_ids):
nodes_cp = np.array(nodes_cp)
test_x = nodes_cp[ii]
train_idx=np.setxor1d(full_index, chip_ids)
train_x = nodes_cp[train_idx]
if(len(train_x[0].shape)==1):
train_concat = flatten_list_1d(train_x)
else:
train_concat = []
for jj, x in enumerate(train_x):
if(jj==0):
train_concat = x
else:
train_concat= np.vstack((train_concat, x))
return train_concat, test_x
def evaluate(out, labels):
"""
Calculates the accuracy between the prediction and the ground truth.
:param out: predicted outputs of the explainer
:param labels: ground truth of the data
:returns: int accuracy
"""
acc = torch.mean(torch.square(out-labels))
return acc
def evaluate_mae(out, labels):
"""
Calculates the accuracy between the prediction and the ground truth.
:param out: predicted outputs of the explainer
:param labels: ground truth of the data
:returns: int accuracy
"""
acc = torch.mean(torch.abs(out-labels))
return acc
def evaluate_acc(out, labels):
"""
Calculates the accuracy between the prediction and the ground truth.
:param out: predicted outputs of the explainer
:param labels: ground truth of the data
:returns: int accuracy
"""
out_cl = torch.max(out,1)[1]
lab_cl = torch.max(labels,1)[1]
diff_sum = torch.sum(torch.abs(out_cl-lab_cl))
acc = 1- (diff_sum/out.shape[0])
return acc
def gen_gridparams(dropout_probs, learning_rates, weight_decays, hidden_dims):
fit_param_list = []
for prob in dropout_probs:
for rate in learning_rates:
for decay in weight_decays:
for hd in hidden_dims:
fit_params= dict()
fit_params['dropout_prob']=prob
fit_params['learning_rate']=rate
fit_params['weight_decay']=decay
fit_params['hidden_dims']=hd
fit_param_list.append(fit_params)
return fit_param_list
def run_gridsearch_batch_x(nodes, FCs, target_frs, epoch_n, iter_n, model_string, fit_param_list, device, chip_ids):
fit_result=[]
for entry in fit_param_list:
fit_params= dict()
fit_params['dropout_prob']=entry['dropout_prob']
fit_params['learning_rate']=entry['learning_rate']
fit_params['weight_decay']=entry['weight_decay']
fit_params['hidden_dims']=entry['hidden_dims']
fit_params['fit_result']=run_GNN_batch_x(nodes, FCs, target_frs, epoch_n, iter_n, model_string, fit_params, device, chip_ids, 1)
fit_result.append(fit_params)
return fit_result
def standard_scale(features,train_idx, validate_idx, test_idx):
features_wip = np.copy(features)
if(len(features_wip.shape)==1):
X_train, X_scaler = standardscaler_transform(features_wip[train_idx].reshape(-1,1))
X_validate = X_scaler.transform(features_wip[validate_idx].reshape(-1,1))
X_test = X_scaler.transform(features_wip[test_idx].reshape(-1,1))
features_wip[train_idx] = np.squeeze(X_train)
features_wip[validate_idx] = np.squeeze(X_validate)
features_wip[test_idx] = np.squeeze(X_test)
else:
X_train, X_scaler = standardscaler_transform(features_wip[train_idx, :])
X_validate = X_scaler.transform(features_wip[validate_idx, :])
X_test = X_scaler.transform(features_wip[test_idx, :])
features_wip[train_idx, :] = X_train
features_wip[validate_idx, :] = X_validate
features_wip[test_idx, :] = X_test
return features_wip
def make_rgcn_mat(train_FC, device):
edge_idx = np.array(np.where(train_FC!=0))
edge_idx = torch.tensor(edge_idx, device= device)
edge_type = train_FC[np.where(train_FC!=0)]
types = np.unique(edge_type)
edge_class = np.squeeze(np.zeros((edge_type.shape[0],1)))
for jj, typ in enumerate(types):
idx = np.where(edge_type==typ)[0]
edge_class[idx]=jj
edge_weight = torch.tensor(edge_class, device=device).type(torch.LongTensor)
return edge_idx, edge_weight
def match_network_param(sage_params_uniq, chip_ids):
uniq_chip = np.unique(chip_ids)
uniq_indices=[]
for uniq_c in uniq_chip:
indices = np.where(np.array(chip_ids)==uniq_c)[0]
uniq_indices.append(indices[0])
sage_params = dict()
for k,v in sage_params_uniq.items():
sage_params[k] = []
# get the sequence straight
seq = np.argsort(uniq_indices)
for k,v in sage_params_uniq.items():
for zz, idx in enumerate(seq):
st_p=uniq_indices[idx]
n_same = len(np.where(np.array(chip_ids)==np.array(chip_ids[st_p]))[0])
for _ in range(n_same):
sage_params[k].append(sage_params_uniq[k][zz])
return sage_params
def run_GNN_batch_x(nodes, FCs, target_frs, n_epoch, iter_n, model_string, fit_params_list, device, chip_ids, gridsearch=0):
# compute GCN assuming same nodes
#seeds
np.random.seed(42)
random.seed(42)
num_features= nodes[0].shape[1]
#number of classes
if(len(target_frs[0].shape)==1):
num_classes=1
else:
num_classes = target_frs[0].shape[1]
per_network=[]
for ii in range(len(target_frs)):
train_acc_vec=[]
train_mae_vec=[]
model_params_vec=[]
test_acc_vec=[]
test_mae_vec=[]
validate_curves_list =[]
train_curves_list=[]
# prep x,y
target_cp = np.copy(target_frs)
full_index= np.arange(len(target_frs))
#get target y first
test_y = target_cp[ii]
# make x
nodes_cp = np.copy(nodes)
# FC
FC_cp = np.copy(FCs)
#params
if(gridsearch==0):
fit_params = fit_params_list[ii]
else:
fit_params = fit_params_list
for iter_ in range(iter_n):
# targets
test_y = target_cp[ii]
# val_y = target_cp[val_idx]
#get idx from same chips
same_chip = np.where(np.array(chip_ids) == chip_ids[ii])[0]
if(gridsearch==0):
train_idx=np.setxor1d(full_index, same_chip) # got rid of it
else:
train_idx = np.setxor1d(full_index, ii)
train_y = target_cp[train_idx]
train_y = flatten_list_1d(train_y)
# make x
#features (input)
if(gridsearch==0):
train_x, test_x= batch_split_x(nodes_cp, full_index, ii, same_chip) #identical function to wp1_data_description, wp1_data class
else:
train_x, test_x= batch_split(nodes_cp, full_index, ii)
#stack train and val for scaling
#scale them
scaled_x, train_scaler_x=standardscaler_transform(train_x)
test_x = train_scaler_x.transform(test_x)
train_x = train_scaler_x.transform(train_x)
# val_x = train_scaler_x.transform(val_x)
# scale y
scaled_y, train_scaler_y=standardscaler_transform(train_y.reshape(-1,1))
train_y = train_scaler_y.transform(train_y.reshape(-1,1))
test_y = train_scaler_y.transform(test_y.reshape(-1,1))
# val_y = train_scaler_y.transform(val_y.reshape(-1,1))
# FCs
train_FC= make_diag_batch_FC(FC_cp[train_idx])
test_FC = FC_cp[ii]
# put into cuda
train_x = torch.tensor(train_x, device = device, dtype=float)
train_y = torch.tensor(train_y, device = device, dtype=float)
test_x = torch.tensor(test_x, device = device, dtype=float)
test_y = torch.tensor(test_y, device = device, dtype=float)
if(num_classes==1):
train_y = torch.reshape(train_y, (train_y.shape[0], 1))
test_y = torch.reshape(test_y, (test_y.shape[0], 1))
edge_idx= dict()
edge_weight =dict()
edge_idx['train'] = np.array(np.where(train_FC>0))
edge_idx['train'] = torch.tensor(edge_idx['train'], device = device)
edge_weight['train'] = train_FC[np.where(train_FC>0)]
edge_weight['train'] = torch.tensor(edge_weight['train'], device = device, dtype=float)
#prep for testing
edge_idx['test'] = np.array(np.where(test_FC>0))
edge_idx['test'] = torch.tensor(edge_idx['test'], device = device)
edge_weight['test'] = test_FC[np.where(test_FC>0)]
edge_weight['test'] = torch.tensor(edge_weight['test'], device = device, dtype=float)
model = gnn_torch_models.return_model(model_string, num_features, num_classes, fit_params['dropout_prob'], fit_params['hidden_dims'])
model.to(device, dtype=float)
if('rgcn' in model_string):
edge_idx= dict()
edge_weight =dict()
edge_idx['train'], edge_weight['train'] = make_rgcn_mat(train_FC, device)
edge_idx['test'], edge_weight['test'] = make_rgcn_mat(test_FC, device)
edge_idx['train'] = torch.tensor(edge_idx['train'], device= device)
edge_idx['test'] = torch.tensor(edge_idx['test'], device= device)
edge_weight['train'] = torch.tensor(edge_weight['train'], device= device, dtype=float)
edge_weight['test'] = torch.tensor(edge_weight['test'], device= device, dtype=float)
# edge_idx['val'], edge_weight['val'] = make_rgcn_mat(val_FC, device)
optimizer = torch.optim.Adam(model.parameters(), lr=fit_params['learning_rate'], weight_decay= fit_params['weight_decay'])
if(model_string == 'gcn_class'):
criterion = torch.nn.CrossEntropyLoss()
else:
criterion = torch.nn.MSELoss()
train_acc_curve=[]
validate_acc_curve=[]
#epochs
if(gridsearch==0):
n_epoch = fit_params['bs_epoch']
for epoch in range(n_epoch):
model.train()
optimizer.zero_grad()
out = model.forward(train_x, edge_idx['train'], edge_weight['train']) # forward
loss = criterion(out, train_y)
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 2.0)
optimizer.step()
# eval flag
model.eval()
with torch.no_grad():
out=dict()
out['train'] = model(train_x, edge_idx['train'], edge_weight['train'])
out['test'] = model(test_x, edge_idx['test'], edge_weight['test'])
# out['test'] = model(test_x, edge_idx['test'], edge_weight['test'])
# Evaluate train
mse=dict()
mae=dict()
mse['train'] = evaluate(out['train'], train_y)
mae['train'] = evaluate_mae(out['train'], train_y)
mse['test'] = evaluate(out['test'], test_y)
mae['test'] = evaluate_mae(out['test'], test_y)
if(epoch% 50==0):
print(f"Epoch: {epoch}, train_acc: {mse['train']:.4f}, validate_acc : {mse['test']:.4f}, LR : {optimizer.param_groups[0]['lr']:.8f}")
train_acc_curve.append(mse['train'].cpu().numpy())
validate_acc_curve.append(mse['test'].cpu().numpy())
# for each iter
train_acc_vec.append(mse['train'].cpu().numpy())
train_mae_vec.append(mae['train'].cpu().numpy())
validate_curves_list.append(np.array(validate_acc_curve))
train_curves_list.append(np.array(train_acc_curve))
model_dict=dict()
for k,v in model.state_dict().items():
model_dict[k] =v.cpu()
if(gridsearch==0):
model_params_vec.append(model_dict)
# test
with torch.no_grad():
out['test'] = model(test_x, edge_idx['test'], edge_weight['test'])
mse['test'] = evaluate(out['test'], test_y)
mae['test'] = evaluate_mae(out['test'], test_y)
print(f"iteration: {iter_}, test_acc: {mse['test']:.4f}")
test_acc_vec.append(mse['test'].cpu().numpy())
test_mae_vec.append(mae['test'].cpu().numpy())
result = dict()
result['mse_train']=np.array(train_acc_vec)
result['mae_train']=np.array(train_mae_vec)
result['mse_test']= np.array(test_acc_vec)
result['mae_test'] = np.array(test_mae_vec)
result['train_curve']=train_curves_list
result['validate_curve']=validate_curves_list
per_network.append(result)
return per_network
def simple_forward_model(model, input_vec, adj_mat, cuda, gpu_id):
x = torch.tensor(input_vec)
# lab_out = torch.tensor(target_vec)
# lab_out = torch.reshape(lab_out, (adj_mat.shape[0], 1))
edge_idx = np.array(np.where(adj_mat>0))
edge_idx = torch.tensor(edge_idx)
edge_weight = adj_mat[np.where(adj_mat>0)]
edge_weight = torch.tensor(edge_weight)
if(cuda):
# lab_out=lab_out.cuda(gpu_id)
x = x.cuda(gpu_id)
edge_idx = edge_idx.cuda(gpu_id)
edge_weight=edge_weight.cuda(gpu_id)
model = model.cuda(gpu_id)
with torch.no_grad():
out = model.forward(x, edge_idx, edge_weight)
return out.cpu().detach().numpy()
| [
"torch.nn.CrossEntropyLoss",
"torch.reshape",
"torch.abs",
"torch.tensor",
"torch.max",
"torch.square",
"torch.nn.MSELoss",
"torch.no_grad"
] | 1.7.1 | arahangua/gnn_prediction_sn | 3b3b8da07ee920c94f1a88fab87472860eec6322 |
1.9 | import os
import json
import random
import argparse
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.nn.init as weight_init
from torch.utils.data import Dataset, DataLoader
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
class OutlierDataset(Dataset):
def __init__(self, X):
self.X = X.astype('float')
def __getitem__(self, index):
x = self.X[index, :]
x = torch.tensor(x, dtype=torch.float32)
return index, x
def __len__(self):
return len(self.X)
class Model(nn.Module):
def __init__(self, input_size, dropout=0.5):
super(Model, self).__init__()
self.dropout = dropout
if self.dropout > 0:
self.dropout = nn.Dropout(dropout)
self.encode_w1 = nn.Linear(input_size, 64)
self.encode_w2 = nn.Linear(64, 32)
self.decode_w1 = nn.Linear(32, 64)
self.decode_w2 = nn.Linear(64, input_size)
def encoder(self, x):
x = self.encode_w1(x)
x = torch.relu(x)
x = self.encode_w2(x)
x = torch.relu(x)
if self.dropout:
x = self.dropout(x)
return x
def decoder(self, x):
x = self.decode_w1(x)
x = torch.relu(x)
x = self.decode_w2(x)
return x
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
class Detector(object):
def __init__(
self,
lr=3e-3,
weight_decay=1e-5,
batch_size=128,
epochs=10
):
self.lr = lr
self.weight_decay = weight_decay
self.batch_size = batch_size
self.epochs = epochs
self.threshold = 0.5
def cal_recon_err(self, preds, targets):
recon_err = F.mse_loss(preds, targets, reduction='none').mean(axis=-1)
return recon_err
def cal_loss(self, preds, targets):
loss_mse = self.cal_recon_err(preds, targets)
return loss_mse.mean()
def run_batch(self, batch, train):
idx, x = batch
inputs = x.to(DEVICE)
outputs = self.model(inputs)
if train:
self.optimizer.zero_grad()
train_err = self.cal_recon_err(outputs, inputs)
loss = train_err.mean()
loss.backward()
self.optimizer.step()
else:
loss = self.cal_loss(outputs, inputs)
loss = loss.item()
bsz = inputs.size(0)
return loss * bsz, bsz, train_err.detach().cpu().tolist()
def train(self, epoch=None):
self.model.train()
total_loss = 0
total_cnt = 0
train_errs = []
for batch_idx, batch in enumerate(self.train_iter):
loss, bsz, train_err = self.run_batch(batch, train=True)
total_loss += loss
total_cnt += bsz
train_errs += train_err
status = {'total_loss':total_loss/total_cnt}
mean = np.mean(train_errs)
std = np.std(train_errs)
self.threshold = mean + 2*std
return status
def get_model(self, input_size):
self.model = Model(input_size=input_size).to(DEVICE)
self.optimizer = optim.Adam(self.model.parameters(),
lr=self.lr,
weight_decay=self.weight_decay)
def fit(self, X):
dataset = OutlierDataset(X)
self.train_iter = DataLoader(dataset=dataset,
batch_size=self.batch_size,
shuffle=True)
self.get_model(X.shape[1])
wait = 0
best_loss = 1e9
iteration = tqdm(range(1, self.epochs + 1))
for epoch in iteration:
epoch_status = self.train(epoch)
if best_loss > epoch_status['total_loss']:
best_loss = epoch_status['total_loss']
wait = 0
else:
wait += 1
if wait > 3:
break
return self
def extract(self, X):
dataset = OutlierDataset(X)
outlier_iter = DataLoader(dataset=dataset,
batch_size=self.batch_size)
outlier_idxs = []
self.model.eval()
with torch.no_grad():
for batch in outlier_iter:
idx, x = batch
inputs = x.to(DEVICE)
outputs = self.model(inputs)
recon_err = self.cal_recon_err(outputs, inputs)
outlier_idx = recon_err > self.threshold
outlier_idx = idx[outlier_idx]
outlier_idxs += outlier_idx.tolist()
return outlier_idxs
def fit_extract(self, X, **fit_params):
return self.fit(X, **fit_params).extract(X)
class OutlierDetector(object):
def __init__(self, input_fname, result_path):
self.get_data(input_fname)
self.input_fname = input_fname
self.result_path = result_path
def get_data(self, input_fname):
data = pd.read_csv(input_fname)
num_idx = data.dtypes[data.dtypes != 'object'].index
num_vars = [data.columns.get_loc(idx) for idx in num_idx]
cat_vars = list(set(range(data.shape[1])) - set(num_vars))
self.data = data
self.num_vars = num_vars
self.cat_vars = cat_vars
def write_json(self, outlier_idxs):
obj = {"result": dict()}
obj["result"]["num_outliers"] = len(outlier_idxs)
obj["result"]["outlier_indices"] = outlier_idxs
result_json_fname = os.path.join(self.result_path, "result.json")
with open(result_json_fname, "w") as json_file:
json.dump(obj, json_file)
def run(self):
if not os.path.isdir(self.result_path):
os.makedirs(self.result_path)
X_noise = self.data.iloc[:, self.num_vars]
X_noise = StandardScaler().fit_transform(X_noise)
detector = Detector()
outlier_idxs = detector.fit_extract(X_noise)
self.write_json(outlier_idxs)
n = self.data.shape[0]
idxs = list(range(n))
clear_idxs = list(set(idxs) - set(outlier_idxs))
result_csv_fname = os.path.join(self.result_path, 'result.csv')
self.data.iloc[clear_idxs, :].to_csv(result_csv_fname, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_fname', type=str, default='bank.csv')
parser.add_argument('--result_path', type=str, default='bank_outlier')
args = parser.parse_args()
detector = OutlierDetector(input_fname=args.input_fname, result_path=args.result_path)
detector.run()
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cuda.manual_seed_all",
"torch.relu",
"torch.no_grad",
"torch.manual_seed",
"torch.nn.functional.mse_loss",
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.9.0 | dohnlee/qufa2021 | 5fb42caee09ec228358e49768e32c75e3c0094ce |
1.1 | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : [email protected]
import os
import logging
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from datetime import datetime
from models.BaseModel import SequentialModel
class TiMiRecLight(SequentialModel):
runner = 'TiMiRunner'
extra_log_args = ['emb_size', 'attn_size', 'K', 'temp', 'add_pos', 'n_layers']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--attn_size', type=int, default=8,
help='Size of attention vectors.')
parser.add_argument('--K', type=int, default=2,
help='Number of hidden intent.')
parser.add_argument('--add_pos', type=int, default=1,
help='Whether add position embedding.')
parser.add_argument('--temp', type=float, default=1,
help='Temperature in knowledge distillation loss.')
parser.add_argument('--n_layers', type=int, default=1,
help='Number of the projection layer.')
parser.add_argument('--stage', type=int, default=3,
help='Stage of training: 1-pretrain_extractor, 2-pretrain_predictor, 3-joint_finetune.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
self.emb_size = args.emb_size
self.attn_size = args.attn_size
self.K = args.K
self.add_pos = args.add_pos
self.temp = args.temp
self.n_layers = args.n_layers
self.stage = args.stage
self.max_his = args.history_max
super().__init__(args, corpus)
self.extractor_path = '../model/TiMiRecLight/Extractor__{}__{}__emb_size={}__K={}__add_pos={}.pt'\
.format(corpus.dataset, args.random_seed, self.emb_size, self.K, self.add_pos)
self.predictor_path = '../model/TiMiRecLight/Predictor__{}__{}__emb_size={}.pt' \
.format(corpus.dataset, args.random_seed, self.emb_size)
if self.stage == 1:
self.model_path = self.extractor_path
elif self.stage == 2:
self.model_path = self.predictor_path
def _define_params(self):
if self.stage in [1, 3]:
self.interest_extractor = MultiInterestExtractor(
self.K, self.item_num, self.emb_size, self.attn_size, self.max_his, self.add_pos)
if self.stage in [2, 3]:
self.intent_predictor = IntentPredictor(self.item_num, self.emb_size)
if self.stage == 3:
self.proj = nn.Sequential()
for i, _ in enumerate(range(self.n_layers - 1)):
self.proj.add_module('proj_' + str(i), nn.Linear(self.emb_size, self.emb_size))
self.proj.add_module('dropout_' + str(i), nn.Dropout(p=0.5))
self.proj.add_module('relu_' + str(i), nn.ReLU(inplace=True))
self.proj.add_module('proj_final', nn.Linear(self.emb_size, self.K))
def load_model(self, model_path=None):
if model_path is None:
model_path = self.model_path
model_dict = self.state_dict()
state_dict = torch.load(model_path)
exist_state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
model_dict.update(exist_state_dict)
self.load_state_dict(model_dict)
logging.info('Load model from ' + model_path)
def actions_before_train(self):
if self.stage == 3 and os.path.exists(self.extractor_path):
self.load_model(self.extractor_path)
# self.load_model(self.predictor_path)
return
logging.info('Train from scratch!')
@staticmethod
def similarity(a, b):
a = F.normalize(a, dim=-1)
b = F.normalize(b, dim=-1)
return (a * b).sum(dim=-1)
@staticmethod
def js_div(p, q):
kl = nn.KLDivLoss(reduction='none')
p, q = p.softmax(-1), q.softmax(-1)
log_mean = ((p + q) / 2).log()
js = (kl(log_mean, p) + kl(log_mean, q)) / 2
return js
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # bsz, -1
history = feed_dict['history_items'] # bsz, max_his + 1
lengths = feed_dict['lengths'] # bsz
batch_size, seq_len = history.shape
out_dict = dict()
if self.stage == 1: # pretrain extractor
interest_vectors = self.interest_extractor(history, lengths) # bsz, K, emb
i_vectors = self.interest_extractor.i_embeddings(i_ids)
if feed_dict['phase'] == 'train':
target_vector = i_vectors[:, 0] # bsz, emb
target_intent = (interest_vectors * target_vector[:, None, :]).sum(-1) # bsz, K
idx_select = target_intent.max(-1)[1] # bsz
user_vector = interest_vectors[torch.arange(batch_size), idx_select, :] # bsz, emb
prediction = (user_vector[:, None, :] * i_vectors).sum(-1)
else:
prediction = (interest_vectors[:, None, :, :] * i_vectors[:, :, None, :]).sum(-1) # bsz, -1, K
prediction = prediction.max(-1)[0] # bsz, -1
elif self.stage == 2: # pretrain predictor
his_vector = self.intent_predictor(history, lengths)
i_vectors = self.intent_predictor.i_embeddings(i_ids)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
else: # finetune
interest_vectors = self.interest_extractor(history, lengths) # bsz, K, emb
i_vectors = self.interest_extractor.i_embeddings(i_ids)
his_vector = self.intent_predictor(history, lengths) # bsz, K
# pred_intent = self.similarity(interest_vectors.detach(), his_vector.unsqueeze(1)) # bsz, K
pred_intent = self.proj(his_vector) # bsz, K
user_vector = (interest_vectors * pred_intent.softmax(-1)[:, :, None]).sum(-2) # bsz, emb
if feed_dict['phase'] == 'train':
target_vector = i_vectors[:, 0] # bsz, emb
target_intent = self.similarity(interest_vectors, target_vector.unsqueeze(1)) # bsz, K
# idx_select = pred_intent.max(-1)[1] # bsz
# user_vector = interest_vectors[torch.arange(batch_size), idx_select, :] # bsz, emb
out_dict['pred_intent'] = pred_intent
out_dict['target_intent'] = target_intent
self.check_list.append(('intent', pred_intent.softmax(-1)))
self.check_list.append(('target', target_intent.softmax(-1)))
prediction = (user_vector[:, None, :] * i_vectors).sum(-1)
out_dict['prediction'] = prediction.view(batch_size, -1)
# For JS divergence analysis
if self.stage != 2 and feed_dict['phase'] == 'test':
target_vector = i_vectors[:, 0] # bsz, emb
target_intent = self.similarity(interest_vectors, target_vector.unsqueeze(1)) # bsz, K
idx = torch.from_numpy(np.arange(batch_size)).to(self.device)
rec_vector = i_vectors[idx, prediction.max(-1)[1]]
rec_intent = self.similarity(interest_vectors, rec_vector.unsqueeze(1)) # bsz, K
out_dict['js'] = self.js_div(target_intent, rec_intent).sum(-1)
out_dict['dis'] = (interest_vectors[:, 0, :] - interest_vectors[:, 0, :]).pow(2).sum(-1)
for i in range(self.K - 1):
for j in range(i + 1, self.K):
out_dict['dis'] += (interest_vectors[:, i, :] - interest_vectors[:, j, :]).pow(2).sum(-1)
out_dict['dis'] /= (self.K * (self.K - 1) / 2)
return out_dict
def loss(self, out_dict: dict):
if self.stage in [1, 2]: # pretrain
loss = super().loss(out_dict)
else: # finetune
pred_intent = out_dict['pred_intent'] / self.temp
target_intent = out_dict['target_intent'].detach() / self.temp
# target_intent = out_dict['target_intent'] / self.temp
kl_criterion = nn.KLDivLoss(reduction='batchmean')
loss = kl_criterion(F.log_softmax(pred_intent, dim=1), F.softmax(target_intent, dim=1))
loss = super().loss(out_dict) + self.temp * self.temp * loss
# loss = super().loss(out_dict)
return loss
class MultiInterestExtractor(nn.Module):
def __init__(self, k, item_num, emb_size, attn_size, max_his, add_pos):
super(MultiInterestExtractor, self).__init__()
self.max_his = max_his
self.add_pos = add_pos
self.i_embeddings = nn.Embedding(item_num, emb_size)
if self.add_pos:
self.p_embeddings = nn.Embedding(max_his + 1, emb_size)
self.W1 = nn.Linear(emb_size, attn_size)
self.W2 = nn.Linear(attn_size, k)
def forward(self, history, lengths):
batch_size, seq_len = history.shape
valid_his = (history > 0).long()
his_vectors = self.i_embeddings(history)
if self.add_pos:
len_range = torch.from_numpy(np.arange(self.max_his)).to(history.device)
position = (lengths[:, None] - len_range[None, :seq_len]) * valid_his
pos_vectors = self.p_embeddings(position)
his_pos_vectors = his_vectors + pos_vectors
else:
his_pos_vectors = his_vectors
# Multi-Interest Extraction
attn_score = self.W2(self.W1(his_pos_vectors).tanh()) # bsz, his_max, K
attn_score = attn_score.masked_fill(valid_his.unsqueeze(-1) == 0, -np.inf)
attn_score = attn_score.transpose(-1, -2) # bsz, K, his_max
attn_score = (attn_score - attn_score.max()).softmax(dim=-1)
attn_score = attn_score.masked_fill(torch.isnan(attn_score), 0)
interest_vectors = (his_vectors[:, None, :, :] * attn_score[:, :, :, None]).sum(-2) # bsz, K, emb
return interest_vectors
class IntentPredictor(nn.Module):
def __init__(self, item_num, emb_size):
super(IntentPredictor, self).__init__()
self.i_embeddings = nn.Embedding(item_num + 1, emb_size)
self.rnn = nn.GRU(input_size=emb_size, hidden_size=emb_size, batch_first=True)
def forward(self, history, lengths):
his_vectors = self.i_embeddings(history)
sort_lengths, sort_idx = torch.topk(lengths, k=len(lengths))
sort_seq = his_vectors.index_select(dim=0, index=sort_idx)
seq_packed = torch.nn.utils.rnn.pack_padded_sequence(sort_seq, sort_lengths.cpu(), batch_first=True)
output, hidden = self.rnn(seq_packed, None)
unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1]
his_vector = hidden[-1].index_select(dim=0, index=unsort_idx)
return his_vector
| [
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.nn.Dropout",
"torch.nn.GRU",
"torch.isnan",
"torch.nn.functional.softmax",
"torch.nn.Sequential",
"torch.arange",
"torch.nn.functional.log_softmax",
"torch.nn.ReLU",
"torch.nn.KLDivLoss",
"torch.load",
"torch.nn.Embedding"
] | 1.1.0 | Andrewnar/ReChorus | 55ceb37beb7b9967a4d18d9899075a8d88d11ddb |
1.6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file : model.py
@author: zijun
@contact : [email protected]
@date : 2020/11/17 14:57
@version: 1.0
@desc :
"""
import torch
from torch import nn
from transformers import AutoModel, AutoConfig
from datasets.collate_functions import collate_to_max_length
class ExplainableModel(nn.Module):
def __init__(self, bert_dir):
super().__init__()
self.bert_config = AutoConfig.from_pretrained(bert_dir, output_hidden_states=False, num_labels=3)
self.intermediate = AutoModel.from_pretrained(bert_dir)
self.span_info_collect = SICModel(self.bert_config.hidden_size)
self.interpretation = InterpretationModel(self.bert_config.hidden_size)
self.output = nn.Linear(self.bert_config.hidden_size, self.bert_config.num_labels)
def forward(self, input_ids, start_indexs, end_indexs, span_masks):
# generate mask
attention_mask = (input_ids != 1).long()
# intermediate layer
hidden_states = self.intermediate(input_ids, attention_mask=attention_mask).last_hidden_state # output.shape = (bs, length, hidden_size)
# span info collecting layer(SIC)
h_ij = self.span_info_collect(hidden_states, start_indexs, end_indexs)
# interpretation layer
H, a_ij = self.interpretation(h_ij, span_masks)
# output layer
out = self.output(H)
return out, a_ij
class SICModel(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self.W_1 = nn.Linear(hidden_size, hidden_size)
self.W_2 = nn.Linear(hidden_size, hidden_size)
self.W_3 = nn.Linear(hidden_size, hidden_size)
self.W_4 = nn.Linear(hidden_size, hidden_size)
def forward(self, hidden_states, start_indexs, end_indexs):
W1_h = self.W_1(hidden_states) # (bs, length, hidden_size)
W2_h = self.W_2(hidden_states)
W3_h = self.W_3(hidden_states)
W4_h = self.W_4(hidden_states)
W1_hi_emb = torch.index_select(W1_h, 1, start_indexs) # (bs, span_num, hidden_size)
W2_hj_emb = torch.index_select(W2_h, 1, end_indexs)
W3_hi_start_emb = torch.index_select(W3_h, 1, start_indexs)
W3_hi_end_emb = torch.index_select(W3_h, 1, end_indexs)
W4_hj_start_emb = torch.index_select(W4_h, 1, start_indexs)
W4_hj_end_emb = torch.index_select(W4_h, 1, end_indexs)
# [w1*hi, w2*hj, w3(hi-hj), w4(hi⊗hj)]
span = W1_hi_emb + W2_hj_emb + (W3_hi_start_emb - W3_hi_end_emb) + torch.mul(W4_hj_start_emb, W4_hj_end_emb)
h_ij = torch.tanh(span)
return h_ij
class InterpretationModel(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.h_t = nn.Linear(hidden_size, 1)
def forward(self, h_ij, span_masks):
o_ij = self.h_t(h_ij).squeeze(-1) # (ba, span_num)
# mask illegal span
o_ij = o_ij - span_masks
# normalize all a_ij, a_ij sum = 1
a_ij = nn.functional.softmax(o_ij, dim=1)
# weight average span representation to get H
H = (a_ij.unsqueeze(-1) * h_ij).sum(dim=1) # (bs, hidden_size)
return H, a_ij
| [
"torch.nn.Linear",
"torch.mul",
"torch.tanh",
"torch.nn.functional.softmax",
"torch.index_select"
] | 1.6.0 | kco4776/Self_Explaining_Structures_Improve_NLP_Models | dbc2d852cbe8bffd22b18425e9a4bac00d557eeb |
1.6 | print("importing")
from datasets import load_dataset
from datasets import load_metric
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import TrainingArguments, DefaultFlowCallback, PrinterCallback
from transformers import Trainer
import torch
from torch import nn
import numpy as np
import pickle
from sklearn.preprocessing import StandardScaler
import random
import json
sep_token = "[SEP]" # FORDOR maybe many special tokens
pretrained_model_name = "roberta-base" # 'bert-base-cased'
class my_Bert(nn.Module):
def __init__(self, bert):
super().__init__()
self.bert = bert
def forward(self,input_ids,attention_mask=None,labels=None,**kwargs):
res = self.bert.forward(input_ids,attention_mask,labels=labels,**kwargs)
print(f"FORDOR-input_ids {input_ids}")
print(f"FORDOR-inputss {tokenizer.decode(input_ids[0])}")
print(f"FORDOR-inputss {tokenizer.decode(input_ids[1])}")
print(f"FORDOR-labels {labels}")
print(f"FORDOR-res {res}")
return res
print("starting load")
# for i in range(len(dataset["train_eli5"])):
# print(f'train= {dataset["train_eli5"][i]["answers"]}')
# print(f'valid= {dataset["validation_eli5"][i]["answers"]}')
# print(f'test= {dataset["test_eli5"][i]["answers"]}')
class ELI5MetricDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
def changeArr(input1):
# Copy input array into newArray
newArray = input1.copy()
# Sort newArray[] in ascending order
newArray.sort()
# Dictionary to store the rank of
# the array element
ranks = {}
rank = 1
for index in range(len(newArray)):
element = newArray[index];
# Update rank of element
if element not in ranks:
ranks[element] = rank
rank += 1
# Assign ranks to elements
for index in range(len(input1)):
element = input1[index]
input1[index] = float(ranks[input1[index]])
my_dataset = {}
if False:# try:
with open("my_dataset.pickle", "rb" ) as f:
my_dataset = pickle.load(f)
else: # except IOError:
print("could not load my_dataset - preprocessing")
raw_datasets = load_dataset("eli5")
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
def preprocess_data(split_name):
with open(f'{split_name}.json', 'a') as the_file:
inputs = []
labels = []
cnt = 0
for example in raw_datasets[split_name]:
question = example["title"]+ example["selftext"] #FORDOR add special sep token?
for i in range (len (example["answers"]["a_id"])):
answer = example["answers"]["text"][i]
# question = question.replace('"','\\"')
# answer = answer.replace('"','\\"')
the_file.write(f'{{"text": {json.dumps(question)}, "summary": {json.dumps(answer)} }}\n')
# inputs.append(question + sep_token + answer)
# print (f'FORDOR float - {float(example["answers"]["score"][i])} {example["answers"]["score"][i]}')
# labels.append(float(example["answers"]["score"][i]))
cnt = cnt+1
if cnt > 200000:
break
# tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
#shuffle data
# c = list(zip(inputs, labels))
# random.seed(42)
# random.shuffle(c)
# inputs, labels = zip(*c)
# inputs = list(inputs)
# labels = list(labels)
# encodings = tokenizer(inputs, padding="max_length", truncation=True)
# encodings2 = tokenizer(inputs, padding="max_length", truncation=False)
# for i in range(len(encodings)):
# if len(encodings[i]) != len( encodings2[i]):
# print (print(f"encoding and length {encodings[i]}, {len(encodings[i])} no truncation = {encodings2[i]}, {len(encodings2[i])}"))
#
# tensor_labels = torch.as_tensor(labels).reshape(-1,1)
# scaler = StandardScaler()
# scaler.fit(tensor_labels)
# scaled_labels = scaler.transform(tensor_labels).astype(np.float32)
# changeArr(labels)
# my_dataset[split_name] = ELI5MetricDataset(encodings, scaled_labels)
# print (f"FORDOR lens {len(encodings)}=={len(labels)}")
# assert len(encodings) == len(labels)
preprocess_data("train_eli5")
preprocess_data("validation_eli5")
# pickle.dump( my_dataset, open( "my_dataset.pickle", "wb" ) )
# metric = load_metric("spearmanr")
# def compute_metrics(eval_pred):
# logits, labels = eval_pred
# print(f'logits- {max(logits)}, {min(logits)}')
# print(f'labels- {max(labels)}, {min(labels)}')
# return metric.compute(predictions=logits, references=labels)
# model = AutoModelForSequenceClassification.from_pretrained(pretrained_model_name, num_labels=1)
# # freezing bert parameters leaving only regression layer
# # for param in model.bert.parameters():
# # param.requires_grad = False
# # model = my_Bert(model)
# # print (f"FORDOR model = {str(model)}")
# # print (f'FORDOR debug {raw_datasets["train_eli5"][0]["answers"]} =:= {model(input_ids=my_dataset["train_eli5"][0]["input_ids"].unsqueeze(0), attention_mask=my_dataset["train_eli5"][0]["attention_mask"].unsqueeze(0), token_type_ids=my_dataset["train_eli5"][0]["token_type_ids"].unsqueeze(0))}')
# training_args = TrainingArguments("test_trainer", evaluation_strategy="steps", eval_steps=10000, save_steps=10000, per_device_train_batch_size=8, per_device_eval_batch_size=8)
# trainer = Trainer(model=model, args=training_args, train_dataset=my_dataset["train_eli5"], eval_dataset=my_dataset["validation_eli5"], compute_metrics=compute_metrics,
# callbacks = [
# DefaultFlowCallback(),
# PrinterCallback()
# ],
# )
# #, max_steps=3000
# trainer.train()
# # model.eval()
# # print (f'FORDOR2 debug {raw_datasets["train_eli5"][0]["answers"]} =:= {model(input_ids=my_dataset["train_eli5"][0]["input_ids"].unsqueeze(0).cuda(), attention_mask=my_dataset["train_eli5"][0]["attention_mask"].unsqueeze(0).cuda(), token_type_ids=my_dataset["train_eli5"][0]["token_type_ids"].unsqueeze(0).cuda())}')
# # print (f'FORDOR3 debug {raw_datasets["train_eli5"][0]["answers"]} =:= {model(input_ids=my_dataset["train_eli5"][1]["input_ids"].unsqueeze(0).cuda(), attention_mask=my_dataset["train_eli5"][1]["attention_mask"].unsqueeze(0).cuda(), token_type_ids=my_dataset["train_eli5"][1]["token_type_ids"].unsqueeze(0).cuda())}')
# # print (f'FORDOR4 debug {raw_datasets["train_eli5"][1]["answers"]} =:= {model(input_ids=my_dataset["train_eli5"][4]["input_ids"].unsqueeze(0).cuda(), attention_mask=my_dataset["train_eli5"][4]["attention_mask"].unsqueeze(0).cuda(), token_type_ids=my_dataset["train_eli5"][4]["token_type_ids"].unsqueeze(0).cuda())}')
# print ("evaluation starting")
# print (trainer.evaluate())
| [
"torch.tensor"
] | 1.6.0 | Dorcoh4/BARTScore | e24fd22b80a01ef142ce43e24ec585f1ee8c1ff2 |
1.0 | import argparse
import numpy as np
import json
import torch
from torchvision import datasets, transforms
from _params import add_common_params, add_decentralized_params
from _train_utils import test, plot_learning_curve
from _node import Node
from _byzantine_node import ByzantineNode
from _data_utils import default_transform, MNISTSlice
from _logic import *
from _krum import krum, _distance
from _average import get_average_gradients, get_std_gradients
from _attack import setup_lp_norm_attack, byzantine_committee_vote
from _trimmed_mean import trimmed_mean
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Decentralized Training')
add_common_params(parser)
add_decentralized_params(parser)
args = parser.parse_args()
use_multiprocess = not(args.no_multiprocess)
trainset_full = datasets.MNIST('../data', train=True, download=True, transform=default_transform)
torch.manual_seed(args.seed)
use_cuda = torch.cuda.is_available()
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=default_transform),
batch_size=args.test_batch_size,
shuffle=True,
**kwargs)
train_labels = trainset_full.train_labels.numpy()
train_data = trainset_full.train_data.numpy()
train_label_indices = {}
# distribute data across nodes
print('setting up the simulation:: creating {} distributed nodes...'.format(args.nodes))
for digit in range(10):
train_label_indices[digit] = np.where(train_labels == digit)[0]
n_byzantine = int(args.nodes * args.byzantine)
expected_n_byzantine_committee = int(np.ceil(args.committee_size * args.expected_byzantine))
expected_n_byzantine_participants = int(np.ceil(args.participants_size * args.expected_byzantine))
nodes = []
byzantine_idx = []
for node_idx in range(args.nodes):
node_indices = []
for digit in range(10):
node_indices.extend(np.random.choice(
train_label_indices[digit],
size=int(args.sample_size / 10))) # sample randomly from each label
node_data = torch.from_numpy(train_data[node_indices])
node_labels = torch.from_numpy(train_labels[node_indices])
node_trainset = MNISTSlice(
root='../data',
data=node_data,
labels=node_labels,
train=True,
transform=default_transform,
download=True)
if node_idx < n_byzantine: # node was chosen as byzantine node
byzantine_idx.append(node_idx)
node = ByzantineNode.create(mode=args.byzantine_mode)(
node_idx,
node_trainset,
batch_size=args.batch_size,
learning_rate=args.lr,
momentum=args.momentum,
log_interval=args.log_interval,
mode=args.byzantine_mode)
else:
node = Node(
node_idx,
node_trainset,
batch_size=args.batch_size,
learning_rate=args.lr,
momentum=args.momentum,
log_interval=args.log_interval)
nodes.append(node)
nodes = np.array(nodes)
honest_nodes = [n for n in nodes if n.id not in byzantine_idx]
print('Created {} Byzantine nodes: {}'.format(len(byzantine_idx), byzantine_idx))
print('Done.')
# decentralized training
print('starting decentralized training...')
print(' ==> expecting {} byzantines in each committee, and {} byzantines in each participants group.'.format(
expected_n_byzantine_committee, expected_n_byzantine_participants))
consensus_w = honest_nodes[0].get_weights()
align_all_nodes_to_consensus(nodes, consensus_w)
learning_curve = []
test_accuracy = []
for i in range(args.epochs):
print('epoch:: {} out of {}'.format(i + 1, args.epochs))
while True:
participant_ids = select_participants(
n_nodes=args.nodes,
n_participants=args.participants_size)
committe_ids = select_committee(
n_nodes=args.nodes,
n_committee=args.committee_size,
exclude=participant_ids)
byzantine_participants_ids = set(participant_ids).intersection(set(byzantine_idx))
print('{} byzantine participants selected...'.format(len(byzantine_participants_ids)))
byzantine_committee_ids = set(committe_ids).intersection(set(byzantine_idx))
print('{} byzantine committe selected...'.format(len(byzantine_committee_ids)))
if (len(byzantine_participants_ids) < args.participants_size / 2) and (len(byzantine_committee_ids) < args.committee_size / 2):
break
participants = nodes[participant_ids]
committee = nodes[committe_ids]
print('training all nodes...')
all_train_loss = run_all(participants, multiprocess=use_multiprocess)
avg_train_loss = np.mean([loss for id_, loss in all_train_loss if id_ not in byzantine_idx])
# setting up the Lp-norm attack (if there are byzantines)
if args.byzantine_mode == 'lp-norm':
honest_participants = [n for n in participants if n.id not in byzantine_idx]
mu = get_average_gradients(honest_participants)
std = get_std_gradients(honest_participants)
gamma = setup_lp_norm_attack(participants, byzantine_idx, mu, std, consensus_w, f=expected_n_byzantine_participants)
print('Chosen Lp-norm attack gamma: {}'.format(gamma))
if args.aggregator == 'union-consensus':
print('collecting weights from participants...')
w_array = collect_participants_weights(participants)
print('collecting votes from committee...')
honest_committee = [n for n in committee if n.id not in byzantine_idx]
byzantine_committee = [n for n in committee if n.id in byzantine_idx]
votes = collect_committee_votes(honest_committee, w_array, f=expected_n_byzantine_participants, multiprocess=True)
byzantine_vote = byzantine_committee_vote(participants, byzantine_idx, f=expected_n_byzantine_participants)
[votes.update({n.id: byzantine_vote}) for n in byzantine_committee]
print("Votes:", dict([(k, participant_ids[v]) for k, v in votes.items()]))
union_consensus, n_unique_recipients = reach_union_consensus(votes, f=expected_n_byzantine_committee)
union_consensus_ids = participant_ids[union_consensus]
print('reached union consensous of size {}, with {} unique recipients'.format(
len(union_consensus),
n_unique_recipients))
byzantine_consensus_ids = set(union_consensus_ids).intersection(byzantine_participants_ids)
print('Consensus: {}, #Byzantine nodes inside: {} --> {}'.format(
union_consensus_ids, len(byzantine_consensus_ids), byzantine_consensus_ids))
consensus_w = get_average_union_consensus(w_array, union_consensus)
align_all_nodes_to_consensus(nodes, consensus_w)
learning_curve.append({
'train_loss': avg_train_loss,
'union_size': len(union_consensus),
'n_unique_recipients': n_unique_recipients,
'n_byzantine_participants': len(byzantine_participants_ids),
'n_byzantine_committee': len(byzantine_committee_ids),
'n_byzantine_consensus': len(byzantine_consensus_ids),
})
elif args.aggregator == 'krum':
print('collecting gradients from participants and running krum...')
krum_node_idx, krum_scores = krum(participants, f=expected_n_byzantine_participants)
selected_node = participants[krum_node_idx]
is_byzantine_selected = int(selected_node.id in byzantine_participants_ids)
print('Selected node by krum: {}, is byzantine: {}'.format(selected_node.id, is_byzantine_selected))
print('Krum selected score: {}'.format(krum_scores[krum_node_idx]))
consensus_w = selected_node.get_weights()
align_all_nodes_to_consensus(nodes, consensus_w)
learning_curve.append({
'train_loss': avg_train_loss,
'selected_node': selected_node.id,
'is_byzantine_selected': is_byzantine_selected,
})
elif args.aggregator == 'trimmed-mean':
print('collecting gradients from participants and running trimmed mean...')
trimmed_mean_grads = trimmed_mean(participants, f=expected_n_byzantine_participants)
# simulate the step take by the trimmed mean gradient
honest_participants = [n for n in participants if n.id not in byzantine_idx]
proxy_node = honest_participants[0]
proxy_node.set_weights(consensus_w)
proxy_node.set_gradients(trimmed_mean_grads)
proxy_node.take_step()
consensus_w = proxy_node.get_weights()
align_all_nodes_to_consensus(nodes, consensus_w)
learning_curve.append({
'train_loss': avg_train_loss
})
else: # average
print('collecting gradients from participants and running average...')
average_grads = get_average_gradients(participants)
# simulate the step take by the average gradient
honest_participants = [n for n in participants if n.id not in byzantine_idx]
proxy_node = honest_participants[0]
proxy_node.set_weights(consensus_w)
proxy_node.set_gradients(average_grads)
proxy_node.take_step()
consensus_w = proxy_node.get_weights()
align_all_nodes_to_consensus(nodes, consensus_w)
learning_curve.append({
'train_loss': avg_train_loss
})
if args.byzantine_mode == 'lp-norm':
learning_curve[-1]['gamma'] = gamma
if i % 1 == 0:
accuracy, popular_misses = test(
args, participants[0]._model, participants[0]._device, test_loader)
test_accuracy.append({'accuracy': accuracy, 'popular_misses': popular_misses})
with open('raw_learning_curve__{}.json'.format(args.aggregator), 'w') as f_raw:
json.dump(
{
'setting': vars(args),
'train': learning_curve,
'evaluation': test_accuracy
},
f_raw
)
if __name__ == '__main__':
main()
| [
"torch.from_numpy",
"torch.manual_seed",
"torch.cuda.is_available"
] | 1.0.1 | aprilpear/holdout-sgd | fa81bce57fb98aef262536fb2d7a26567d3143f7 |
1.1 | import logging
import time
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os,random
import numpy as np
import torch
from utils_glue import output_modes, processors
from pytorch_pretrained_bert.my_modeling import BertConfig
from pytorch_pretrained_bert import BertTokenizer
from optimization import BERTAdam
import config
from utils import divide_parameters, load_and_cache_examples
from modeling import BertForGLUESimple,BertForGLUESimpleAdaptor
from textbrewer import DistillationConfig, TrainingConfig, GeneralDistiller
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from tqdm import tqdm
from utils_glue import compute_metrics
from functools import partial
def args_check(args):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
logger.warning("Output directory () already exists and is not empty.")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
args.n_gpu = n_gpu
args.device = device
return device, n_gpu
def predict(model,eval_datasets,step,args):
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_output_dir = args.output_dir
results = {}
for eval_task,eval_dataset in zip(eval_task_names, eval_datasets):
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
logger.info("Predicting...")
logger.info("***** Running predictions *****")
logger.info(" task name = %s", eval_task)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.predict_batch_size)
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
#起始时间
start_time = time.time()
pred_logits = []
label_ids = []
for batch in tqdm(eval_dataloader, desc="Evaluating", disable=None):
input_ids, input_mask, segment_ids, labels = batch
input_ids = input_ids.to(args.device)
input_mask = input_mask.to(args.device)
segment_ids = segment_ids.to(args.device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids)
cpu_logits = logits.detach().cpu()
for i in range(len(cpu_logits)):
pred_logits.append(cpu_logits[i].numpy())
label_ids.append(labels[i])
pred_logits = np.array(pred_logits)
label_ids = np.array(label_ids)
if args.output_mode == "classification":
preds = np.argmax(pred_logits, axis=1)
else: # args.output_mode == "regression":
preds = np.squeeze(pred_logits)
result = compute_metrics(eval_task, preds, label_ids)
logger.info(f"task:,{eval_task}")
logger.info(f"result: {result}")
results.update(result)
cost_time = time.time() - start_time
logger.info(f"--- 评估{len(eval_dataset)}条数据的总耗时是 {cost_time} seconds, 每条耗时 {cost_time/len(eval_dataset)} seconds ---")
output_eval_file = os.path.join(eval_output_dir, "eval_results-%s.txt" % eval_task)
with open(output_eval_file, "a") as writer:
logger.info("***** Eval results {} task {} *****".format(step, eval_task))
writer.write("step: %d ****\n " % step)
for key in sorted(results.keys()):
logger.info("%s = %s", key, str(results[key]))
writer.write("%s = %s\n" % (key, str(results[key])))
model.train()
return results
def main():
#parse arguments
config.parse()
args = config.args
for k,v in vars(args).items():
logger.info(f"{k}:{v}")
#set seeds
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
#arguments check
device, n_gpu = args_check(args)
os.makedirs(args.output_dir, exist_ok=True)
forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
args.forward_batch_size = forward_batch_size
#load bert config
bert_config_T = BertConfig.from_json_file(args.bert_config_file_T)
bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
assert args.max_seq_length <= bert_config_T.max_position_embeddings
assert args.max_seq_length <= bert_config_S.max_position_embeddings
#Prepare GLUE task
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
#read data
train_dataset = None
eval_datasets = None
num_train_steps = None
tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
# 加载数据集
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
if args.aux_task_name:
aux_train_dataset = load_and_cache_examples(args, args.aux_task_name, tokenizer, evaluate=False, is_aux=True)
train_dataset = torch.utils.data.ConcatDataset([train_dataset, aux_train_dataset])
num_train_steps = int(len(train_dataset)/args.train_batch_size) * args.num_train_epochs
if args.do_predict:
eval_datasets = []
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
for eval_task in eval_task_names:
eval_datasets.append(load_and_cache_examples(args, eval_task, tokenizer, evaluate=True))
logger.info("数据集加载成功")
#加载模型,加载teacher和student模型
model_T = BertForGLUESimple(bert_config_T, num_labels=num_labels,args=args)
model_S = BertForGLUESimple(bert_config_S, num_labels=num_labels,args=args)
#加载teacher模型参数
if args.tuned_checkpoint_T is not None:
state_dict_T = torch.load(args.tuned_checkpoint_T, map_location='cpu')
model_T.load_state_dict(state_dict_T)
model_T.eval()
else:
assert args.do_predict is True
#Load student
if args.load_model_type=='bert':
assert args.init_checkpoint_S is not None
state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
if args.only_load_embedding:
state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.embeddings')}
missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
logger.info(f"Missing keys {list(missing_keys)}")
else:
state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}
missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
assert len(missing_keys)==0
logger.info("Model loaded")
elif args.load_model_type=='all':
assert args.tuned_checkpoint_S is not None
state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu')
model_S.load_state_dict(state_dict_S)
logger.info("Model loaded")
else:
logger.info("Student模型没有可加载参数,随机初始化参数 randomly initialized.")
model_T.to(device)
model_S.to(device)
if args.local_rank != -1 or n_gpu > 1:
if args.local_rank != -1:
raise NotImplementedError
elif n_gpu > 1:
model_T = torch.nn.DataParallel(model_T) #,output_device=n_gpu-1)
model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1)
if args.do_train:
#parameters
params = list(model_S.named_parameters())
all_trainable_params = divide_parameters(params, lr=args.learning_rate)
logger.info("Length of all_trainable_params: %d", len(all_trainable_params))
#优化器配置
optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate,
warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule,
s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Forward batch size = %d", forward_batch_size)
logger.info(" Num backward steps = %d", num_train_steps)
########### DISTILLATION ###########
train_config = TrainingConfig(
gradient_accumulation_steps = args.gradient_accumulation_steps,
ckpt_frequency = args.ckpt_frequency,
log_dir = args.output_dir,
output_dir = args.output_dir,
device = args.device)
# 定义了一些固定的matches配置文件
from matches import matches
intermediate_matches = None
if isinstance(args.matches,(list,tuple)):
intermediate_matches = []
for match in args.matches:
intermediate_matches += matches[match]
logger.info(f"中间层match信息: {intermediate_matches}")
distill_config = DistillationConfig(
temperature = args.temperature,
intermediate_matches=intermediate_matches)
logger.info(f"训练配置: {train_config}")
logger.info(f"蒸馏配置: {distill_config}")
adaptor_T = partial(BertForGLUESimpleAdaptor, no_logits=args.no_logits, no_mask = args.no_inputs_mask)
adaptor_S = partial(BertForGLUESimpleAdaptor, no_logits=args.no_logits, no_mask = args.no_inputs_mask)
# 支持中间状态匹配的通用蒸馏模型
distiller = GeneralDistiller(train_config = train_config,
distill_config = distill_config,
model_T = model_T, model_S = model_S,
adaptor_T = adaptor_T,
adaptor_S = adaptor_S)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
raise NotImplementedError
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)
callback_func = partial(predict, eval_datasets=eval_datasets, args=args)
with distiller:
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=args.num_train_epochs, callback=callback_func)
if not args.do_train and args.do_predict:
res = predict(model_S,eval_datasets,step=0,args=args)
print (res)
if __name__ == "__main__":
main()
| [
"torch.utils.data.ConcatDataset",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.utils.data.RandomSampler",
"torch.distributed.init_process_group",
"torch.utils.data.DistributedSampler",
"torch.utils.data.SequentialSampler",
"torch.no_grad",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.DataParallel"
] | 1.1 | johnson7788/TextBrewer | fa7fa4d4a2a8debde5b148d448238f3b4fa1aa9a |
1.1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020/12/23 4:56 下午
# @File : main.trainer_predict_api.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc :
import logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os, random, time
import numpy as np
import torch
from pytorch_pretrained_bert.my_modeling import BertConfig
from pytorch_pretrained_bert import BertTokenizer
from modeling import BertSPCSimple, BertForGLUESimpleAdaptorTraining
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from tqdm import tqdm
from utils_glue import InputExample, convert_examples_to_features
import argparse
from flask import Flask, request, jsonify, abort
######################################################
# 使用没有蒸馏的模型预测,改造成一个flask api,
######################################################
app = Flask(__name__)
def load_examples(contents, max_seq_length, tokenizer, label_list):
"""
:param contents: eg: [('苹果很好用', '苹果')]
:param max_seq_length:
:param tokenizer: 初始化后的tokenizer
:param label_list:
:return:
"""
examples = []
for guid, content in enumerate(contents):
sentence, aspect = content
examples.append(
InputExample(guid=guid, text_a=sentence, text_b=aspect))
features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer,
output_mode="classification",
cls_token_segment_id=0, pad_token_segment_id=0)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
return dataset
class TorchAsBertModel(object):
def __init__(self, verbose=0):
self.verbose = verbose
self.label_list = ["NEG", "NEU", "POS"]
self.num_labels = len(self.label_list)
# 判断使用的设备
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
self.tokenizer, self.model = self.load_model()
# 句子左右最大truncate序列长度
self.left_max_seq_len = 15
self.right_max_seq_len = 20
self.aspect_max_seq_len = 30
def load_model(self):
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.output_encoded_layers = True
args.output_attention_layers = True
args.output_att_score = True
args.output_att_sum = True
self.args = args
# 解析配置文件, 教师模型和student模型的vocab是不变的
self.vocab_file = "bert_model/vocab.txt"
# 这里是使用的teacher的config和微调后的teacher模型, 也可以换成student的config和蒸馏后的student模型
# student config: config/chinese_bert_config_L4t.json
# distil student model: distil_model/gs8316.pkl
self.bert_config_file_S = "bert_model/config.json"
self.tuned_checkpoint_S = "trained_teacher_model/gs3024.pkl"
self.max_seq_length = 70
# 预测的batch_size大小
self.predict_batch_size = 64
# 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
bert_config_S = BertConfig.from_json_file(self.bert_config_file_S)
# 加载tokenizer
tokenizer = BertTokenizer(vocab_file=self.vocab_file)
# 加载模型
model_S = BertSPCSimple(bert_config_S, num_labels=self.num_labels, args=self.args)
state_dict_S = torch.load(self.tuned_checkpoint_S, map_location=self.device)
model_S.load_state_dict(state_dict_S)
if self.verbose:
print("模型已加载")
return tokenizer, model_S
def truncate(self, input_text, max_len, trun_post='post'):
"""
实施截断数据
:param input_text:
:param max_len: eg: 15
:param trun_post: 截取方向,向前还是向后截取,
"pre":截取前面的, "post":截取后面的
:return:
"""
if max_len is not None and len(input_text) > max_len:
if trun_post == "post":
return input_text[-max_len:]
else:
return input_text[:max_len]
else:
return input_text
def clean(self, text_left, aspect, text_right):
"""
截断数据
:param text_left:
:param aspect:
:param text_right:
:return:
"""
text_left = self.truncate(text_left, self.left_max_seq_len)
aspect = self.truncate(aspect, self.aspect_max_seq_len)
text_right = self.truncate(text_right, self.right_max_seq_len, trun_post="pre")
return text_left, aspect, text_right
def predict_batch(self, data):
"""
batch_size数据处理
:param data: 是一个要处理的数据列表
:return:
"""
contents = []
for one_data in data:
content, aspect, aspect_start, aspect_end = one_data
text_left = content[:aspect_start]
text_right = content[aspect_end:]
text_left, aspect, text_right = self.clean(text_left, aspect, text_right)
new_content = text_left + aspect + text_right
contents.append((new_content, aspect))
eval_dataset = load_examples(contents, self.max_seq_length, self.tokenizer, self.label_list)
if self.verbose:
print("评估数据集已加载")
res = self.do_predict(self.model, eval_dataset)
if self.verbose:
print(f"预测的结果是: {res}, {[self.label_list[id] for id in res]}")
# TODO 输入为一条数据,返回也只返回一条结果即可以了
return res
def predict_batch_without_turncate(self, data):
"""
batch_size数据处理
:param data: 是一个要处理的数据列表[(content,aspect),...,]
:return:
"""
eval_dataset = load_examples(data, self.max_seq_length, self.tokenizer, self.label_list)
if self.verbose:
print("评估数据集已加载")
res = self.do_predict(self.model, eval_dataset)
if self.verbose:
print(f"预测的结果是: {res}, {[self.label_list[id] for id in res]}")
#把id变成标签
result = [self.label_list[r] for r in res]
return result
def do_predict(self, model, eval_dataset):
# 任务名字
results = []
if self.verbose:
print("***** 开始预测 *****")
print(" 样本数 = %d", len(eval_dataset))
print(" Batch size = %d", self.predict_batch_size)
# 评估样本
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=self.predict_batch_size)
model.eval()
model.to(self.device)
# 起始时间
start_time = time.time()
# 存储预测值
pred_logits = []
for batch in tqdm(eval_dataloader, desc="评估中", disable=True):
input_ids, input_mask, segment_ids = batch
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids)
cpu_logits = logits.detach().cpu()
for i in range(len(cpu_logits)):
pred_logits.append(cpu_logits[i].numpy())
pred_logits = np.array(pred_logits)
# 找到最大的概率label
preds = np.argmax(pred_logits, axis=1)
if self.verbose:
print(f"preds: {preds}")
results.extend(preds.tolist())
cost_time = time.time() - start_time
if self.verbose:
print(
f"--- 评估{len(eval_dataset)}条数据的总耗时是 {cost_time} seconds, 每条耗时 {cost_time / len(eval_dataset)} seconds ---")
return results
@app.route("/api", methods=['POST'])
def api():
"""
Args:
test_data: 需要预测的数据,是一个文字列表, [(content,aspect),...,]
Returns:
"""
jsonres = request.get_json()
test_data = jsonres.get('data', None)
model = TorchAsBertModel()
results = model.predict_batch_without_turncate(test_data)
return jsonify(results)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)
| [
"torch.no_grad",
"torch.utils.data.SequentialSampler",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.load",
"torch.utils.data.TensorDataset"
] | 1.1 | johnson7788/TextBrewer | fa7fa4d4a2a8debde5b148d448238f3b4fa1aa9a |
1.0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Tomasz Kornuta & Vincent Marois"
import torch
from ptp.components.models.model import Model
from ptp.data_types.data_definition import DataDefinition
class LeNet5(Model):
"""
A classical LeNet-5 model for MNIST digits classification.
"""
def __init__(self, name, config):
"""
Initializes the ``LeNet5`` model, creates the required layers.
:param name: Name of the model (taken from the configuration file).
:param config: Parameters read from configuration file.
:type config: ``ptp.configuration.ConfigInterface``
"""
super(LeNet5, self).__init__(name, LeNet5, config)
# Get key mappings.
self.key_inputs = self.stream_keys["inputs"]
self.key_predictions = self.stream_keys["predictions"]
# Retrieve prediction size from globals.
self.prediction_size = self.globals["prediction_size"]
# Create the LeNet-5 layers.
self.conv1 = torch.nn.Conv2d(1, 6, kernel_size=(5, 5))
self.maxpool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = torch.nn.Conv2d(6, 16, kernel_size=(5, 5))
self.maxpool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv3 = torch.nn.Conv2d(16, 120, kernel_size=(5, 5))
self.linear1 = torch.nn.Linear(120, 84)
self.linear2 = torch.nn.Linear(84, self.prediction_size)
def input_data_definitions(self):
"""
Function returns a dictionary with definitions of input data that are required by the component.
:return: dictionary containing input data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return {
self.key_inputs: DataDefinition([-1, 1, 32, 32], [torch.Tensor], "Batch of images [BATCH_SIZE x IMAGE_DEPTH x IMAGE_HEIGHT x IMAGE WIDTH]"),
}
def output_data_definitions(self):
"""
Function returns a dictionary with definitions of output data produced the component.
:return: dictionary containing output data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return {
self.key_predictions: DataDefinition([-1, self.prediction_size], [torch.Tensor], "Batch of predictions, each represented as probability distribution over classes [BATCH_SIZE x PREDICTION_SIZE]")
}
def forward(self, data_streams):
"""
Main forward pass of the ``LeNet5`` model.
:param data_streams: DataStreams({'images',**}), where:
- images: [batch_size, num_channels, width, height]
:type data_streams: ``miprometheus.utils.DataStreams``
:return: Predictions [batch_size, num_classes]
"""
# Add noise to weights
#for _, param in self.named_parameters():
# if param.requires_grad:
# #print (name, param.data)
# #noise = -torch.randn(param.data.shape)*0.3
# noise = 0.3
# param.data = param.data * (1 + noise)
# #print (name, param.data)
# Unpack DataStreams.
img = data_streams[self.key_inputs]
# Pass inputs through layers.
x = self.conv1(img)
x = torch.nn.functional.relu(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = torch.nn.functional.relu(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = torch.nn.functional.relu(x)
x = x.view(-1, 120)
x = self.linear1(x)
x = torch.nn.functional.relu(x)
x = self.linear2(x)
# Log softmax.
predictions = torch.nn.functional.log_softmax(x, dim=1)
# Add predictions to datadict.
data_streams.publish({self.key_predictions: predictions})
| [
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.functional.relu"
] | 1.0.1 | aasseman/pytorchpipe | 9cb17271666061cb19fe24197ecd5e4c8d32c5da |
1.0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Vincent Marois, Tomasz Kornuta"
from os import path,makedirs
import yaml
import torch
from time import sleep
from datetime import datetime
import ptp.configuration.config_parsing as config_parse
import ptp.utils.logger as logging
from ptp.workers.worker import Worker
from ptp.application.task_manager import TaskManager
from ptp.application.pipeline_manager import PipelineManager
from ptp.utils.statistics_collector import StatisticsCollector
from ptp.utils.statistics_aggregator import StatisticsAggregator
class Trainer(Worker):
"""
Base class for the trainers.
Iterates over epochs on the dataset.
All other types of trainers (e.g. ``OnlineTrainer`` & ``OfflineTrainer``) should subclass it.
"""
def __init__(self, name, class_type):
"""
Base constructor for all trainers:
- Adds default trainer command line arguments
:param name: Name of the worker
:type name: str
:param class_type: Class type of the component.
"""
# Call base constructor to set up app state, registry and add default arguments.
super(Trainer, self).__init__(name, class_type)
# Add arguments to the specific parser.
# These arguments will be shared by all basic trainers.
self.parser.add_argument(
'--tensorboard',
action='store',
dest='tensorboard', choices=[0, 1, 2],
type=int,
help="If present, enable logging to TensorBoard. Available log levels:\n"
"0: Log the collected statistics.\n"
"1: Add the histograms of the model's biases & weights (Warning: Slow).\n"
"2: Add the histograms of the model's biases & weights gradients "
"(Warning: Even slower).")
self.parser.add_argument(
'--saveall',
dest='save_intermediate',
action='store_true',
help='Setting to true results in saving intermediate models during training (DEFAULT: False)')
self.parser.add_argument(
'--training',
dest='training_section_name',
type=str,
default="training",
help='Name of the section defining the training procedure (DEFAULT: training)')
self.parser.add_argument(
'--validation',
dest='validation_section_name',
type=str,
default="validation",
help='Name of the section defining the validation procedure (DEFAULT: validation)')
def setup_experiment(self):
"""
Sets up experiment of all trainers:
- Calls base class setup_experiment to parse the command line arguments,
- Loads the config file(s)
- Set up the log directory path
- Add a ``FileHandler`` to the logger
- Set random seeds
- Creates the pipeline consisting of many components
- Creates training task manager
- Handles curriculum learning if indicated
- Creates validation task manager
- Set optimizer
- Performs testing of compatibility of both training and validation tasks and created pipeline.
"""
# Call base method to parse all command line arguments and add default sections.
super(Trainer, self).setup_experiment()
# "Pass" configuration parameters from the "default_training" section to training section indicated by the section_name.
self.config.add_default_params({ self.app_state.args.training_section_name : self.config['default_training'].to_dict()} )
self.config.del_default_params('default_training')
# "Pass" configuration parameters from the "default_validation" section to validation section indicated by the section_name.
self.config.add_default_params({ self.app_state.args.validation_section_name: self.config['default_validation'].to_dict()} )
self.config.del_default_params('default_validation')
# Check the presence of the CUDA-compatible devices.
if self.app_state.args.use_gpu and (torch.cuda.device_count() == 0):
self.logger.error("Cannot use GPU as there are no CUDA-compatible devices present in the system!")
exit(-1)
# Check if config file was selected.
if self.app_state.args.config == '':
print('Please pass configuration file(s) as --c parameter')
exit(-2)
# Split and make them absolute.
root_configs = self.app_state.args.config.replace(" ", "").split(',')
# If there are - expand them to absolute paths.
abs_root_configs = [path.expanduser(config) for config in root_configs]
# Get the list of configurations which need to be loaded.
configs_to_load = config_parse.recurrent_config_parse(abs_root_configs, [], self.app_state.absolute_config_path)
# Read the YAML files one by one - but in reverse order -> overwrite the first indicated config(s)
config_parse.reverse_order_config_load(self.config, configs_to_load)
# -> At this point, the Param Registry contains the configuration loaded (and overwritten) from several files.
# Log the resulting training configuration.
conf_str = 'Loaded (initial) configuration:\n'
conf_str += '='*80 + '\n'
conf_str += yaml.safe_dump(self.config.to_dict(), default_flow_style=False)
conf_str += '='*80 + '\n'
print(conf_str)
# Get training section.
try:
tsn = self.app_state.args.training_section_name
self.config_training = self.config[tsn]
# We must additionally check if it is None - weird behvaiour when using default value.
if self.config_training is None:
raise KeyError()
except KeyError:
print("Error: Couldn't retrieve the training section '{}' from the loaded configuration".format(tsn))
exit(-1)
# Get training task type.
try:
training_task_type = self.config_training['task']['type']
except KeyError:
print("Error: Couldn't retrieve the task 'type' from the training section '{}' in the loaded configuration".format(tsn))
exit(-1)
# Get validation section.
try:
vsn = self.app_state.args.validation_section_name
self.config_validation = self.config[vsn]
if self.config_validation is None:
raise KeyError()
except KeyError:
print("Error: Couldn't retrieve the validation section '{}' from the loaded configuration".format(vsn))
exit(-1)
# Get validation task type.
try:
_ = self.config_validation['task']['type']
except KeyError:
print("Error: Couldn't retrieve the task 'type' from the validation section '{}' in the loaded configuration".format(vsn))
exit(-1)
# Get pipeline section.
try:
psn = self.app_state.args.pipeline_section_name
self.config_pipeline = self.config[psn]
if self.config_pipeline is None:
raise KeyError()
except KeyError:
print("Error: Couldn't retrieve the pipeline section '{}' from the loaded configuration".format(psn))
exit(-1)
# Get pipeline name.
try:
pipeline_name = self.config_pipeline['name']
except KeyError:
# Using name of the first configuration file from command line.
basename = path.basename(root_configs[0])
# Take config filename without extension.
pipeline_name = path.splitext(basename)[0]
# Set pipeline name, so processor can use it afterwards.
self.config_pipeline.add_config_params({'name': pipeline_name})
# Prepare the output path for logging
while True: # Dirty fix: if log_dir already exists, wait for 1 second and try again
try:
time_str = '{0:%Y%m%d_%H%M%S}'.format(datetime.now())
if self.app_state.args.exptag != '':
time_str = time_str + "_" + self.app_state.args.exptag
self.app_state.log_dir = path.expanduser(self.app_state.args.expdir) + '/' + training_task_type + '/' + pipeline_name + '/' + time_str + '/'
# Lowercase dir.
self.app_state.log_dir = self.app_state.log_dir.lower()
makedirs(self.app_state.log_dir, exist_ok=False)
except FileExistsError:
sleep(1)
else:
break
# Set log dir.
self.app_state.log_file = self.app_state.log_dir + 'trainer.log'
# Initialize logger in app state.
self.app_state.logger = logging.initialize_logger("AppState")
# Add handlers for the logfile to worker logger.
logging.add_file_handler_to_logger(self.logger)
self.logger.info("Logger directory set to: {}".format(self.app_state.log_dir))
# Set cpu/gpu types.
self.app_state.set_types()
# Models dir.
self.checkpoint_dir = self.app_state.log_dir + 'checkpoints/'
makedirs(self.checkpoint_dir, exist_ok=False)
# Set random seeds in the training section.
self.set_random_seeds('training', self.config_training)
# Total number of detected errors.
errors =0
################# TRAINING PROBLEM #################
# Build training task manager.
self.training = TaskManager('training', self.config_training)
errors += self.training.build()
# parse the curriculum learning section in the loaded configuration.
if 'curriculum_learning' in self.config_training:
# Initialize curriculum learning - with values from loaded configuration.
self.training.task.curriculum_learning_initialize(self.config_training['curriculum_learning'])
# If the 'must_finish' key is not present in config then then it will be finished by default
self.config_training['curriculum_learning'].add_default_params({'must_finish': True})
self.must_finish_curriculum = self.config_training['curriculum_learning']['must_finish']
self.logger.info("Curriculum Learning activated")
else:
# If not using curriculum learning then it does not have to be finished.
self.must_finish_curriculum = False
self.curric_done = True
################# VALIDATION PROBLEM #################
# Build validation task manager.
self.validation = TaskManager('validation', self.config_validation)
errors += self.validation.build()
###################### PIPELINE ######################
# Build the pipeline using the loaded configuration.
self.pipeline = PipelineManager(pipeline_name, self.config_pipeline)
errors += self.pipeline.build()
# Check errors.
if errors > 0:
self.logger.error('Found {} errors, terminating execution'.format(errors))
exit(-2)
# Show pipeline.
summary_str = self.pipeline.summarize_all_components_header()
summary_str += self.training.task.summarize_io("training")
summary_str += self.validation.task.summarize_io("validation")
summary_str += self.pipeline.summarize_all_components()
self.logger.info(summary_str)
# Handshake definitions.
self.logger.info("Handshaking training pipeline")
defs_training = self.training.task.output_data_definitions()
errors += self.pipeline.handshake(defs_training)
self.logger.info("Handshaking validation pipeline")
defs_valid = self.validation.task.output_data_definitions()
errors += self.pipeline.handshake(defs_valid)
# Check errors.
if errors > 0:
self.logger.error('Found {} errors, terminating execution'.format(errors))
exit(-2)
################## MODEL LOAD/FREEZE #################
# Load the pretrained models params from checkpoint.
try:
# Check command line arguments, then check load option in config.
if self.app_state.args.load_checkpoint != "":
pipeline_name = self.app_state.args.load_checkpoint
msg = "command line (--load)"
elif "load" in self.config_pipeline:
pipeline_name = self.config_pipeline['load']
msg = "'pipeline' section of the configuration file"
else:
pipeline_name = ""
# Try to load the model.
if pipeline_name != "":
if path.isfile(pipeline_name):
# Load parameters from checkpoint.
self.pipeline.load(pipeline_name)
else:
raise Exception("Couldn't load the checkpoint {} indicated in the {}: file does not exist".format(pipeline_name, msg))
# If we succeeded, we do not want to load the models from the file anymore!
else:
# Try to load the models parameters - one by one, if set so in the configuration file.
self.pipeline.load_models()
except KeyError:
self.logger.error("File {} indicated in the {} seems not to be a valid model checkpoint".format(pipeline_name, msg))
exit(-5)
except Exception as e:
self.logger.error(e)
# Exit by following the logic: if user wanted to load the model but failed, then continuing the experiment makes no sense.
exit(-6)
# Finally, freeze the models (that the user wants to freeze).
self.pipeline.freeze_models()
# Log the model summaries.
summary_str = self.pipeline.summarize_models_header()
summary_str += self.pipeline.summarize_models()
self.logger.info(summary_str)
# Move the models in the pipeline to GPU.
if self.app_state.args.use_gpu:
self.pipeline.cuda()
################# OPTIMIZER #################
# Set the optimizer.
optimizer_conf = dict(self.config_training['optimizer'])
optimizer_type = optimizer_conf['type']
del optimizer_conf['type']
# Check if there are any models in the pipeline.
if len(list(filter(lambda p: p.requires_grad, self.pipeline.parameters()))) == 0:
self.logger.error('Cannot proceed with training, as there are no trainable models in the pipeline (or all models are frozen)')
exit(-7)
# Instantiate the optimizer and filter the model parameters based on if they require gradients.
self.optimizer = getattr(torch.optim, optimizer_type)(
filter(lambda p: p.requires_grad, self.pipeline.parameters()), **optimizer_conf)
log_str = 'Optimizer:\n' + '='*80 + "\n"
log_str += " Type: " + optimizer_type + "\n"
log_str += " Params: {}".format(optimizer_conf)
self.logger.info(log_str)
def add_statistics(self, stat_col):
"""
Calls base method and adds epoch statistics to ``StatisticsCollector``.
:param stat_col: ``StatisticsCollector``.
"""
# Add loss and episode.
super(Trainer, self).add_statistics(stat_col)
# Add default statistics with formatting.
stat_col.add_statistics('epoch', '{:02d}')
def add_aggregators(self, stat_agg):
"""
Adds basic aggregators to to ``StatisticsAggregator`` and extends them with: epoch.
:param stat_agg: ``StatisticsAggregator``.
"""
# Add basic aggregators.
super(Trainer, self).add_aggregators(stat_agg)
# add 'aggregators' for the epoch.
stat_agg.add_aggregator('epoch', '{:02d}')
def initialize_statistics_collection(self):
"""
- Initializes all ``StatisticsCollectors`` and ``StatisticsAggregators`` used by a given worker: \
- For training statistics (adds the statistics of the model & task),
- For validation statistics (adds the statistics of the model & task).
- Creates the output files (csv).
"""
# TRAINING.
# Create statistics collector for training.
self.training_stat_col = StatisticsCollector()
self.add_statistics(self.training_stat_col)
self.training.task.add_statistics(self.training_stat_col)
self.pipeline.add_statistics(self.training_stat_col)
# Create the csv file to store the training statistics.
self.training_batch_stats_file = self.training_stat_col.initialize_csv_file(self.app_state.log_dir, 'training_statistics.csv')
# Create statistics aggregator for training.
self.training_stat_agg = StatisticsAggregator()
self.add_aggregators(self.training_stat_agg)
self.training.task.add_aggregators(self.training_stat_agg)
self.pipeline.add_aggregators(self.training_stat_agg)
# Create the csv file to store the training statistic aggregations.
self.training_set_stats_file = self.training_stat_agg.initialize_csv_file(self.app_state.log_dir, 'training_set_agg_statistics.csv')
# VALIDATION.
# Create statistics collector for validation.
self.validation_stat_col = StatisticsCollector()
self.add_statistics(self.validation_stat_col)
self.validation.task.add_statistics(self.validation_stat_col)
self.pipeline.add_statistics(self.validation_stat_col)
# Create the csv file to store the validation statistics.
self.validation_batch_stats_file = self.validation_stat_col.initialize_csv_file(self.app_state.log_dir, 'validation_statistics.csv')
# Create statistics aggregator for validation.
self.validation_stat_agg = StatisticsAggregator()
self.add_aggregators(self.validation_stat_agg)
self.validation.task.add_aggregators(self.validation_stat_agg)
self.pipeline.add_aggregators(self.validation_stat_agg)
# Create the csv file to store the validation statistic aggregations.
self.validation_set_stats_file = self.validation_stat_agg.initialize_csv_file(self.app_state.log_dir, 'validation_set_agg_statistics.csv')
def finalize_statistics_collection(self):
"""
Finalizes the statistics collection by closing the csv files.
"""
# Close all files.
self.training_batch_stats_file.close()
self.training_set_stats_file.close()
self.validation_batch_stats_file.close()
self.validation_set_stats_file.close()
def initialize_tensorboard(self):
"""
Initializes the TensorBoard writers, and log directories.
"""
# Create TensorBoard outputs - if TensorBoard is supposed to be used.
if self.app_state.args.tensorboard is not None:
from tensorboardX import SummaryWriter
self.training_batch_writer = SummaryWriter(self.app_state.log_dir + '/training')
self.training_stat_col.initialize_tensorboard(self.training_batch_writer)
self.training_set_writer = SummaryWriter(self.app_state.log_dir + '/training_set_agg')
self.training_stat_agg.initialize_tensorboard(self.training_set_writer)
self.validation_batch_writer = SummaryWriter(self.app_state.log_dir + '/validation')
self.validation_stat_col.initialize_tensorboard(self.validation_batch_writer)
self.validation_set_writer = SummaryWriter(self.app_state.log_dir + '/validation_set_agg')
self.validation_stat_agg.initialize_tensorboard(self.validation_set_writer)
else:
self.training_batch_writer = None
self.training_set_writer = None
self.validation_batch_writer = None
self.validation_set_writer = None
def finalize_tensorboard(self):
"""
Finalizes the operation of TensorBoard writers by closing them.
"""
# Close the TensorBoard writers.
if self.training_batch_writer is not None:
self.training_batch_writer.close()
if self.training_set_writer is not None:
self.training_set_writer.close()
if self.validation_batch_writer is not None:
self.validation_batch_writer.close()
if self.validation_set_writer is not None:
self.validation_set_writer.close()
def validate_on_batch(self, valid_batch):
"""
Performs a validation of the model using the provided batch.
Additionally logs results (to files, TensorBoard) and handles visualization.
:param valid_batch: data batch generated by the task and used as input to the model.
:type valid_batch: ``DataStreams``
:return: Validation loss.
"""
# Turn on evaluation mode.
self.pipeline.eval()
# Empty the statistics collector.
self.validation_stat_col.empty()
# Compute the validation loss using the provided data batch.
with torch.no_grad():
# Forward pass.
self.pipeline.forward(valid_batch)
# Collect the statistics.
self.collect_all_statistics(self.validation, self.pipeline, valid_batch, self.validation_stat_col)
# Export collected statistics.
self.export_all_statistics(self.validation_stat_col, '[Partial Validation]')
def validate_on_set(self):
"""
Performs a validation of the model on the whole validation set, using the validation ``DataLoader``.
Iterates over the entire validation set (through the `DataLoader``), aggregates the collected statistics \
and logs that to the console, csv and TensorBoard (if set).
"""
# Get number of samples.
num_samples = len(self.validation)
self.logger.info('Validating over the entire validation set ({} samples in {} episodes)'.format(
num_samples, len(self.validation.dataloader)))
# Turn on evaluation mode.
self.pipeline.eval()
# Reset the statistics.
self.validation_stat_col.empty()
# Remember global episode number.
old_episode = self.app_state.episode
with torch.no_grad():
for ep, valid_batch in enumerate(self.validation.dataloader):
self.app_state.episode = ep
# Forward pass.
self.pipeline.forward(valid_batch)
# Collect the statistics.
self.collect_all_statistics(self.validation, self.pipeline, valid_batch,
self.validation_stat_col)
# Revert to global episode number.
self.app_state.episode = old_episode
# Aggregate statistics for the whole set.
self.aggregate_all_statistics(self.validation, self.pipeline,
self.validation_stat_col, self.validation_stat_agg)
# Export aggregated statistics.
self.export_all_statistics(self.validation_stat_agg, '[Full Validation]')
if __name__ == '__main__':
print("The trainer.py file contains only an abstract base class. Please try to use the \
online_trainer (mip-online-trainer) or offline_trainer (mip-offline-trainer) instead.")
| [
"torch.no_grad",
"torch.cuda.device_count"
] | 1.0.1 | aasseman/pytorchpipe | 9cb17271666061cb19fe24197ecd5e4c8d32c5da |
1.4 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
import sys
import torch
import numpy as np
from parameterized import parameterized
from monai.transforms import CopyItemsd
from monai.utils import ensure_tuple
TEST_CASE_1 = ["img", 1, "img_1"]
TEST_CASE_2 = [["img", "seg"], 1, ["img_1", "seg_1"]]
TEST_CASE_3 = ["img", 2, ["img_1", "img_2"]]
TEST_CASE_4 = [["img", "seg"], 2, ["img_1", "seg_1", "img_2", "seg_2"]]
class TestCopyItemsd(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])
def test_numpy_values(self, keys, times, names):
input_data = {"img": np.array([[0, 1], [1, 2]]), "seg": np.array([[0, 1], [1, 2]])}
result = CopyItemsd(keys=keys, times=times, names=names)(input_data)
for name in ensure_tuple(names):
self.assertTrue(name in result)
result[name] += 1
np.testing.assert_allclose(result[name], np.array([[1, 2], [2, 3]]))
np.testing.assert_allclose(result["img"], np.array([[0, 1], [1, 2]]))
def test_tensor_values(self):
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu:0")
input_data = {
"img": torch.tensor([[0, 1], [1, 2]], device=device),
"seg": torch.tensor([[0, 1], [1, 2]], device=device),
}
result = CopyItemsd(keys="img", times=1, names="img_1")(input_data)
self.assertTrue("img_1" in result)
result["img_1"] += 1
torch.testing.assert_allclose(result["img"], torch.tensor([[0, 1], [1, 2]], device=device))
torch.testing.assert_allclose(result["img_1"], torch.tensor([[1, 2], [2, 3]], device=device))
def test_array_values(self):
input_data = {"img": [[0, 1], [1, 2]], "seg": [[0, 1], [1, 2]]}
result = CopyItemsd(keys="img", times=1, names="img_1")(input_data)
self.assertTrue("img_1" in result)
result["img_1"][0][0] += 1
np.testing.assert_allclose(result["img"], [[0, 1], [1, 2]])
np.testing.assert_allclose(result["img_1"], [[1, 1], [1, 2]])
if __name__ == "__main__":
unittest.main()
| [
"torch.device",
"torch.cuda.is_available",
"torch.tensor"
] | 1.4 | Scitator/MONAI | a42b563acf0c7504cee18ee84c8af2eff6e948a7 |
1.4 | import glob
import json
import os
import random
from torch.utils import data
from torch.nn import CrossEntropyLoss
from torch.utils.data import Subset
from datasets.episode import Episode
from datasets.wsd_dataset import WordWSDDataset, MetaWSDDataset
from datasets.ner_dataset import NERSampler, read_examples_from_file, get_labels
from transformers import BertTokenizer
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
def write_json(json_dict, file_name):
with open(file_name, 'w', encoding='utf8') as f:
json.dump(json_dict, f, indent=4)
def read_json(file_name):
with open(file_name, 'r', encoding='utf8') as f:
json_dict = json.load(f)
return json_dict
def get_max_batch_len(batch):
return max([len(x[0]) for x in batch])
def prepare_batch(batch):
max_len = get_max_batch_len(batch)
x = []
lengths = []
y = []
for inp_seq, target_seq in batch:
lengths.append(len(inp_seq))
target_seq = target_seq + [-1] * (max_len - len(target_seq))
x.append(inp_seq)
y.append(target_seq)
# print (lengths[0], x[0], y[0])
return x, lengths, y
def prepare_bert_batch(batch):
x = []
lengths = []
y = []
for sentences, labels in batch:
tokens = []
label_ids = []
length = 0
for word, label in zip(sentences, labels):
word_tokens = bert_tokenizer.tokenize(word)
tokens.extend(word_tokens)
label_ids.extend([label] + [-1] * (len(word_tokens) - 1))
length += len(word_tokens)
# check
# if all([lab == -1 for lab in label_ids]):
# print (labels)
assert(all([lab == -1 for lab in label_ids]) == False)
x.append(tokens)
lengths.append(length)
y.append(label_ids)
max_len = max(lengths)
for i in range(len(y)):
y[i] = y[i] + [-1] * (max_len - len(y[i]))
# print (x[-1])
# print (batch[-1][1])
# print (y[-1])
return x, lengths, y
def prepare_task_batch(batch):
return batch
def generate_semcor_wsd_episodes(wsd_dataset, n_episodes, n_support_examples, n_query_examples, task):
word_splits = {k: v for (k, v) in wsd_dataset.word_splits.items() if len(v['sentences']) >
(n_support_examples + n_query_examples)}
if n_episodes > len(word_splits):
raise Exception('Not enough data available to generate {} episodes'.format(n_episodes))
episodes = []
for word in word_splits.keys():
if len(episodes) == n_episodes:
break
indices = list(range(len(word_splits[word]['sentences'])))
random.shuffle(indices)
start_index = 0
train_subset = WordWSDDataset(sentences=[word_splits[word]['sentences'][i] for i in indices[start_index: start_index + n_support_examples]],
labels=[word_splits[word]['labels'][i] for i in indices[start_index: start_index + n_support_examples]],
n_classes=len(wsd_dataset.sense_inventory[word]))
support_loader = data.DataLoader(train_subset, batch_size=n_support_examples, collate_fn=prepare_batch)
start_index += n_support_examples
test_subset = WordWSDDataset(sentences=[word_splits[word]['sentences'][i] for i in indices[start_index: start_index + n_query_examples]],
labels=[word_splits[word]['labels'][i] for i in indices[start_index: start_index + n_query_examples]],
n_classes=len(wsd_dataset.sense_inventory[word]))
query_loader = data.DataLoader(test_subset, batch_size=n_query_examples, collate_fn=prepare_batch)
episode = Episode(support_loader=support_loader,
query_loader=query_loader,
base_task=task,
task_id=task + '-' + word,
n_classes=train_subset.n_classes)
episodes.append(episode)
return episodes
def generate_wsd_episodes(dir, n_episodes, n_support_examples, n_query_examples, task, meta_train=True):
episodes = []
for file_name in glob.glob(os.path.join(dir, '*.json')):
if len(episodes) == n_episodes:
break
word = file_name.split(os.sep)[-1].split('.')[0]
word_wsd_dataset = MetaWSDDataset(file_name)
train_subset = Subset(word_wsd_dataset, range(0, n_support_examples))
support_loader = data.DataLoader(train_subset, batch_size=n_support_examples, collate_fn=prepare_batch)
if meta_train:
test_subset = Subset(word_wsd_dataset, range(n_support_examples, n_support_examples + n_query_examples))
else:
test_subset = Subset(word_wsd_dataset, range(n_support_examples, len(word_wsd_dataset)))
query_loader = data.DataLoader(test_subset, batch_size=n_query_examples, collate_fn=prepare_batch)
episode = Episode(support_loader=support_loader,
query_loader=query_loader,
base_task=task,
task_id=task + '-' + word,
n_classes=word_wsd_dataset.n_classes)
episodes.append(episode)
return episodes
def generate_ner_episodes(dir, labels_file, n_episodes, n_support_examples, n_query_examples, task,
meta_train=True, vectors='bert'):
episodes = []
labels = get_labels(labels_file)
examples, label_map = read_examples_from_file(dir, labels)
# print ('label_map', label_map)
ner_dataset = NERSampler(examples, labels, label_map, 6, n_support_examples, n_query_examples, n_episodes)
for index, ner_data in enumerate(ner_dataset):
tags, sup_sents, query_sents = ner_data
# print (len(tags), len(sup_sents.labels), len(query_sents.labels))
if vectors == 'bert':
support_loader = data.DataLoader(sup_sents, batch_size=6*n_support_examples,
collate_fn=lambda pb: prepare_bert_batch(pb))
query_loader = data.DataLoader(query_sents, batch_size=6*n_query_examples,
collate_fn=lambda pb: prepare_bert_batch(pb))
else:
support_loader = data.DataLoader(sup_sents, batch_size=6*n_support_examples,
collate_fn=prepare_batch)
query_loader = data.DataLoader(query_sents, batch_size=6*n_query_examples,
collate_fn=prepare_batch)
episode = Episode(support_loader=support_loader,
query_loader=query_loader,
base_task=task,
task_id=task + '-' + str(index),
n_classes=len(labels))
episodes.append(episode)
return episodes | [
"torch.utils.data.DataLoader"
] | 1.4.0 | muralinba12/MetaLearningForNER | 61b5159059e486b8e0b50fcd8089554bc26249f6 |
1.1 | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
class PointNetEncoder(nn.Module):
def __init__(self, in_channel=64):
super(PointNetEncoder, self).__init__()
self.conv1 = torch.nn.Conv1d(in_channel, 128, 1)
self.conv2 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(128)
self.bn2 = nn.BatchNorm1d(1024)
def forward(self, x):
B, D, N = x.size()
x = F.relu(self.bn1(self.conv1(x)))
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = x.view(-1, 1024, 1).repeat(1, 1, N)
return torch.cat([x, pointfeat], 1)
class PointNetBackbone(nn.Module):
def __init__(self, in_channel=64):
super(PointNetBackbone, self).__init__()
self.feat = PointNetEncoder(in_channel=in_channel)
self.conv1 = torch.nn.Conv1d(1152, 512, 1)
self.conv2 = torch.nn.Conv1d(512, 256, 1)
self.conv3 = torch.nn.Conv1d(256, 128, 1)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(128)
def forward(self, x):
batchsize = x.size()[0]
n_pts = x.size()[2]
x = self.feat(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return x
class VotingModule(nn.Module):
def __init__(self, in_channel=64, feature_channel=128, num_voting=1):
super(VotingModule, self).__init__()
self.pointnet = PointNetBackbone(in_channel)
self.conv1 = nn.Conv1d(feature_channel, feature_channel, 1, bias=False)
self.conv2 = nn.Conv1d(feature_channel, feature_channel, 1, bias=False)
self.offset = nn.Conv1d(feature_channel, 2, 1, bias=False)
self.stride = nn.Conv1d(feature_channel, 1, 1, bias=False)
self.prob = nn.Conv1d(feature_channel, 1, 1, bias=False)
self.sigmoid = nn.Sigmoid()
self.bn1 = nn.BatchNorm1d(feature_channel)
self.bn2 = nn.BatchNorm1d(feature_channel)
def forward(self, input_feature):
voting_feature = self.pointnet(input_feature)
voting_feature = F.relu(self.bn1(self.conv1(voting_feature)))
voting_feature = F.relu(self.bn2(self.conv2(voting_feature)))
centering_offset = self.offset(voting_feature)
stride = F.relu(self.stride(voting_feature))
prob = self.sigmoid(self.prob(voting_feature))
return centering_offset, stride, prob
if __name__ == '__main__':
model = VotingModule()
xyz = torch.rand(12, 64, 6000)
data_dict = {'pillar_feature': xyz}
output = model(data_dict)
| [
"torch.rand",
"torch.cat",
"torch.nn.Sigmoid",
"torch.nn.Conv1d",
"torch.max",
"torch.nn.BatchNorm1d"
] | 1.1 | wyddmw/RotPred | 18ca1a565fdbf90e8016e51ed5a3b84dc12109f3 |
1.7 | # Copyright (c) 2021. TsumiNa. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from collections import OrderedDict
from pathlib import Path
from scipy.special import softmax
import numpy as np
import pandas as pd
import pytest
from shutil import rmtree
import torch
import os
from xenonpy.model import SequentialLinear
from xenonpy.model.training import Trainer
from xenonpy.model.training.base import BaseExtension, BaseRunner
from xenonpy.model.training.extension import TensorConverter, Validator, Persist
from xenonpy.model.utils import regression_metrics, classification_metrics
@pytest.fixture(scope='module')
def data():
# ignore numpy warning
import warnings
print('ignore NumPy RuntimeWarning\n')
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
dir_ = os.path.dirname(os.path.abspath(__file__))
yield
try:
rmtree(str(Path('.').resolve() / 'test_model'))
except:
pass
try:
rmtree(str(Path('.').resolve() / 'test_model@1'))
except:
pass
try:
rmtree(str(Path('.').resolve() / 'test_model_1'))
except:
pass
try:
rmtree(str(Path('.').resolve() / 'test_model_2'))
except:
pass
try:
rmtree(str(Path('.').resolve() / 'test_model_3'))
except:
pass
try:
rmtree(str(Path('.').resolve() / Path(os.getcwd()).name))
except:
pass
print('test over')
def test_base_runner_1():
ext = BaseExtension()
x, y = 1, 2
assert ext.input_proc(x, y) == (x, y)
assert ext.output_proc(y, None) == (y, None)
x, y = (1,), 2
assert ext.input_proc(x, y) == (x, y)
assert ext.output_proc(y, None) == (y, None)
x, y = (1,), (2,)
assert ext.input_proc(x, y) == (x, y)
assert ext.output_proc(y, y) == (y, y)
def test_tensor_converter_1():
class _Trainer(BaseRunner):
def __init__(self):
super().__init__()
self.non_blocking = False
def predict(self, x_, y_): # noqa
return x_, y_
trainer = _Trainer()
arr_1 = [1, 2, 3]
np_1 = np.asarray(arr_1)
se_1 = pd.Series(arr_1)
pd_1 = pd.DataFrame(arr_1)
np_ = np.asarray([arr_1, arr_1])
pd_ = pd.DataFrame(np_)
tensor_ = torch.Tensor(np_)
# test auto reshape; #189
converter = TensorConverter(auto_reshape=False)
x, y = converter.input_proc(np_1, None, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (3,)
x, y = converter.input_proc(se_1, None, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (3,)
x, y = converter.input_proc(pd_1, None, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (3, 1)
converter = TensorConverter()
x, y = converter.input_proc(np_1, None, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (3, 1)
x, y = converter.input_proc(se_1, None, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (3, 1)
x, y = converter.input_proc(pd_1, None, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (3, 1)
# normal tests
x, y = converter.input_proc(np_, None, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (2, 3)
assert torch.equal(x, tensor_)
assert y is None
x, y = converter.input_proc(pd_, None, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (2, 3)
assert torch.equal(x, tensor_)
assert y is None
x, y = converter.input_proc(tensor_, None, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (2, 3)
assert torch.equal(x, tensor_)
assert y is None
x, y = converter.input_proc(np_, np_, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (2, 3)
assert torch.equal(x, tensor_)
assert torch.equal(y, tensor_)
x, y = converter.input_proc(pd_, pd_, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (2, 3)
assert torch.equal(x, tensor_)
assert torch.equal(y, tensor_)
x, y = converter.input_proc(tensor_, tensor_, trainer=trainer) # noqa
assert isinstance(x, torch.Tensor)
assert x.shape == (2, 3)
assert torch.equal(x, tensor_)
assert torch.equal(y, tensor_)
converter = TensorConverter(x_dtype=torch.long)
x, y = converter.input_proc((np_, np_), np_, trainer=trainer) # noqa
assert isinstance(x, tuple)
assert len(x) == 2
assert x[0].dtype == torch.long
assert x[1].dtype == torch.long
converter = TensorConverter(x_dtype=(torch.long, torch.float32), y_dtype=torch.long)
x, y = converter.input_proc((np_, np_), np_, trainer=trainer) # noqa
assert isinstance(x, tuple)
assert len(x) == 2
assert x[0].dtype == torch.long
assert x[1].dtype == torch.float32
assert y.dtype == torch.long
converter = TensorConverter(x_dtype=(torch.long, torch.float32))
x, y = converter.input_proc((pd_, pd_), pd_, trainer=trainer) # noqa
assert isinstance(x, tuple)
assert len(x) == 2
assert x[0].dtype == torch.long
assert x[1].dtype == torch.float32
# for tensor input, dtype change will never be executed
converter = TensorConverter(x_dtype=(torch.long, torch.long))
x, y = converter.input_proc((tensor_, tensor_), tensor_, trainer=trainer) # noqa
assert isinstance(x, tuple)
assert len(x) == 2
assert x[0].dtype == torch.float32
assert x[1].dtype == torch.float32
def test_tensor_converter_2():
class _Trainer(BaseRunner):
def __init__(self):
super().__init__()
self.non_blocking = False
def predict(self, x_, y_):
return x_, y_
trainer = _Trainer()
converter = TensorConverter()
np_ = np.asarray([[1, 2, 3], [4, 5, 6]])
pd_ = pd.DataFrame(np_)
tensor_ = torch.Tensor(np_) # noqa
x, y = converter.input_proc(np_, np_[0], trainer=trainer) # noqa
assert isinstance(y, torch.Tensor)
assert y.shape == (3, 1)
assert torch.equal(y, tensor_[0].unsqueeze(-1))
x, y = converter.input_proc(pd_, pd_.iloc[0], trainer=trainer) # noqa
assert isinstance(y, torch.Tensor)
assert y.shape == (3, 1)
assert torch.equal(y, tensor_[0].unsqueeze(-1))
x, y = converter.input_proc(tensor_, tensor_[0], trainer=trainer) # noqa
assert isinstance(y, torch.Tensor)
assert y.shape == (3,)
assert torch.equal(y, tensor_[0])
def test_tensor_converter_3():
np_ = np.asarray([[1, 2, 3], [4, 5, 6]])
tensor_ = torch.from_numpy(np_)
converter = TensorConverter()
y, y_ = converter.output_proc(tensor_, None, training=True)
assert y_ is None
assert isinstance(y, torch.Tensor)
assert y.shape == (2, 3)
assert torch.equal(y, tensor_)
y, y_ = converter.output_proc(tensor_, tensor_, training=True)
assert isinstance(y, torch.Tensor)
assert isinstance(y_, torch.Tensor)
assert y.equal(y_)
assert y.shape == (2, 3)
assert torch.equal(y, tensor_)
y, _ = converter.output_proc((tensor_,), None, training=True)
assert isinstance(y, tuple)
assert isinstance(y[0], torch.Tensor)
assert torch.equal(y[0], tensor_)
y, y_ = converter.output_proc(tensor_, tensor_, training=False)
assert isinstance(y, np.ndarray)
assert isinstance(y_, np.ndarray)
assert np.all(y == y_)
assert y.shape == (2, 3)
assert np.all(y == tensor_.numpy())
y, _ = converter.output_proc((tensor_,), None, training=False)
assert isinstance(y, tuple)
assert isinstance(y[0], np.ndarray)
assert np.all(y[0] == tensor_.numpy())
converter = TensorConverter(argmax=True)
y, y_ = converter.output_proc(tensor_, tensor_, training=False)
assert isinstance(y, np.ndarray)
assert isinstance(y_, np.ndarray)
assert y.shape == (2,)
assert y_.shape == (2, 3)
assert np.all(y == np.argmax(np_, 1))
y, y_ = converter.output_proc((tensor_, tensor_), None, training=False)
assert isinstance(y, tuple)
assert y_ is None
assert y[0].shape == (2,)
assert y[0].shape == y[1].shape
assert np.all(y[0] == np.argmax(np_, 1))
converter = TensorConverter(probability=True)
y, y_ = converter.output_proc(tensor_, tensor_, training=False)
assert isinstance(y, np.ndarray)
assert isinstance(y_, np.ndarray)
assert y.shape == (2, 3)
assert y_.shape == (2, 3)
assert np.all(y == softmax(np_, 1))
y, y_ = converter.output_proc((tensor_, tensor_), None, training=False)
assert isinstance(y, tuple)
assert y_ is None
assert y[0].shape == (2, 3)
assert y[0].shape == y[1].shape
assert np.all(y[0] == softmax(np_, 1))
def test_validator_1():
x = np.random.randn(100) # input
y = x + np.random.rand() * 0.001 # true values
class _Trainer(BaseRunner):
def __init__(self):
super().__init__()
self.x_val = x
self.y_val = y
self.loss_type = 'train_loss'
def predict(self, x_, y_):
return x_, y_
val = Validator('regress', each_iteration=False)
step_info = OrderedDict(train_loss=0, i_epoch=0)
val.step_forward(trainer=_Trainer(), step_info=step_info) # noqa
assert 'val_mae' not in step_info
step_info = OrderedDict(train_loss=0, i_epoch=1)
val.step_forward(trainer=_Trainer(), step_info=step_info) # noqa
assert step_info['val_mae'] == regression_metrics(y, x)['mae']
assert set(step_info.keys()) == {
'i_epoch', 'val_mae', 'val_mse', 'val_rmse', 'val_r2', 'val_pearsonr', 'val_spearmanr',
'val_p_value', 'val_max_ae', 'train_loss'
}
def test_validator_2():
y = np.random.randint(3, size=10) # true labels
x = np.zeros((10, 3)) # input
for i, j in enumerate(y):
x[i, j] = 1
class _Trainer(BaseRunner):
def __init__(self):
super().__init__()
self.x_val = x
self.y_val = y
self.loss_type = 'train_loss'
def predict(self, x_, y_): # noqa
return x_, y_
val = Validator('classify', each_iteration=False)
step_info = OrderedDict(train_loss=0, i_epoch=0)
val.step_forward(trainer=_Trainer(), step_info=step_info) # noqa
assert 'val_f1' not in step_info
step_info = OrderedDict(train_loss=0, i_epoch=1)
val.step_forward(trainer=_Trainer(), step_info=step_info) # noqa
assert step_info['val_f1'] == classification_metrics(y, x)['f1']
assert set(step_info.keys()) == {
'i_epoch', 'val_accuracy', 'val_f1', 'val_precision', 'val_recall', 'val_macro_f1',
'val_macro_precision', 'val_macro_recall', 'train_loss'
}
def test_persist_1(data):
class _Trainer(BaseRunner):
def __init__(self):
super().__init__()
self.model = SequentialLinear(50, 2)
def predict(self, x_, y_): # noqa
return x_, y_
p = Persist()
with pytest.raises(ValueError, match='can not access property `path` before training'):
p.path
p.before_proc(trainer=_Trainer())
assert p.path == str(Path('.').resolve() / Path(os.getcwd()).name)
with pytest.raises(ValueError, match='can not reset property `path` after training'):
p.path = 'aa'
p = Persist('test_model')
p.before_proc(trainer=_Trainer())
assert p.path == str(Path('.').resolve() / 'test_model')
assert (Path('.').resolve() / 'test_model' / 'describe.pkl.z').exists()
assert (Path('.').resolve() / 'test_model' / 'init_state.pth.s').exists()
assert (Path('.').resolve() / 'test_model' / 'model.pth.m').exists()
assert (Path('.').resolve() / 'test_model' / 'model_structure.pkl.z').exists()
p = Persist('test_model', increment=True)
p.before_proc(trainer=_Trainer())
assert p.path == str(Path('.').resolve() / 'test_model@1')
assert (Path('.').resolve() / 'test_model@1' / 'describe.pkl.z').exists()
assert (Path('.').resolve() / 'test_model@1' / 'init_state.pth.s').exists()
assert (Path('.').resolve() / 'test_model@1' / 'model.pth.m').exists()
assert (Path('.').resolve() / 'test_model@1' / 'model_structure.pkl.z').exists()
def test_persist_save_checkpoints(data):
class _Trainer(BaseRunner):
def __init__(self):
super().__init__()
self.model = SequentialLinear(50, 2)
def predict(self, x_, y_): # noqa
return x_, y_
cp_1 = Trainer.checkpoint_tuple(
id='cp_1',
iterations=111,
model_state=SequentialLinear(50, 2).state_dict(),
)
cp_2 = Trainer.checkpoint_tuple(
id='cp_2',
iterations=111,
model_state=SequentialLinear(50, 2).state_dict(),
)
# save checkpoint
p = Persist('test_model_1', increment=False, only_best_states=False)
p.before_proc(trainer=_Trainer())
p.on_checkpoint(cp_1, trainer=_Trainer())
p.on_checkpoint(cp_2, trainer=_Trainer())
assert (Path('.').resolve() / 'test_model_1' / 'checkpoints' / 'cp_1.pth.s').exists()
assert (Path('.').resolve() / 'test_model_1' / 'checkpoints' / 'cp_2.pth.s').exists()
# reduced save checkpoint
p = Persist('test_model_2', increment=False, only_best_states=True)
p.before_proc(trainer=_Trainer())
p.on_checkpoint(cp_1, trainer=_Trainer())
p.on_checkpoint(cp_2, trainer=_Trainer())
assert (Path('.').resolve() / 'test_model_2' / 'checkpoints' / 'cp.pth.s').exists()
assert not (Path('.').resolve() / 'test_model_2' / 'checkpoints' / 'cp_1.pth.s').exists()
assert not (Path('.').resolve() / 'test_model_2' / 'checkpoints' / 'cp_2.pth.s').exists()
# no checkpoint will be saved
p = Persist('test_model_3', increment=False, only_best_states=True)
p.before_proc(trainer=_Trainer())
p.on_checkpoint(cp_2, trainer=_Trainer())
assert not (Path('.').resolve() / 'test_model_3' / 'checkpoints' / 'cp.pth.s').exists()
assert not (Path('.').resolve() / 'test_model_3' / 'checkpoints' / 'cp_2.pth.s').exists()
if __name__ == "__main__":
pytest.main()
| [
"torch.from_numpy",
"torch.equal",
"torch.Tensor"
] | 1.7.0 | mori0711/XenonPy | e36ca0ea112b45ee629cd980c88e80cd6c96c514 |
1.1 | import json
import pprint
import random
import time
import torch
import torch.multiprocessing as mp
from models.nn.resnet import Resnet
from data.preprocess import Dataset
from importlib import import_module
class Eval(object):
# tokens
STOP_TOKEN = "<<stop>>"
SEQ_TOKEN = "<<seg>>"
TERMINAL_TOKENS = [STOP_TOKEN, SEQ_TOKEN]
def __init__(self, args, manager):
# args and manager
self.args = args
self.manager = manager
# load splits
with open(self.args.splits) as f:
self.splits = json.load(f)
pprint.pprint({k: len(v) for k, v in self.splits.items()})
# load model
print("Loading: ", self.args.model_path)
M = import_module(self.args.model)
self.model, optimizer = M.Module.load(self.args.model_path)
self.model.share_memory()
self.model.eval()
# updated args
self.model.args.dout = self.args.model_path.replace(self.args.model_path.split('/')[-1], '')
self.model.args.data = self.args.data if self.args.data else self.model.args.data
# preprocess and save
if args.preprocess:
print("\nPreprocessing dataset and saving to %s folders ... This is will take a while. Do this once as required:" % self.model.args.pp_folder)
self.model.args.fast_epoch = self.args.fast_epoch
dataset = Dataset(self.model.args, self.model.vocab)
dataset.preprocess_splits(self.splits)
# load resnet
args.visual_model = 'resnet18'
self.resnet = Resnet(args, eval=True, share_memory=True, use_conv_feat=True)
# gpu
if self.args.gpu:
self.model = self.model.to(torch.device('cuda'))
# success and failure lists
self.create_stats()
# set random seed for shuffling
random.seed(int(time.time()))
def queue_tasks(self):
'''
create queue of trajectories to be evaluated
'''
task_queue = self.manager.Queue()
files = self.splits[self.args.eval_split]
# debugging: fast epoch
if self.args.fast_epoch:
files = files[:16]
if self.args.shuffle:
random.shuffle(files)
for traj in files:
task_queue.put(traj)
return task_queue
def spawn_threads(self):
'''
spawn multiple threads to run eval in parallel
'''
task_queue = self.queue_tasks()
# start threads
threads = []
lock = self.manager.Lock()
for n in range(self.args.num_threads):
thread = mp.Process(target=self.run, args=(self.model, self.resnet, task_queue, self.args, lock,
self.successes, self.failures, self.results))
thread.start()
threads.append(thread)
for t in threads:
t.join()
# save
self.save_results()
@classmethod
def setup_scene(cls, env, traj_data, r_idx, args, reward_type='dense'):
'''
intialize the scene and agent from the task info
'''
# scene setup
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
object_toggles = traj_data['scene']['object_toggles']
scene_name = 'FloorPlan%d' % scene_num
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
# initialize to start position
env.step(dict(traj_data['scene']['init_action']))
# print goal instr
print("Task: %s" % (traj_data['turk_annotations']['anns'][r_idx]['task_desc']))
# setup task for reward
env.set_task(traj_data, args, reward_type=reward_type)
@classmethod
def run(cls, model, resnet, task_queue, args, lock, successes, failures):
raise NotImplementedError()
@classmethod
def evaluate(cls, env, model, r_idx, resnet, traj_data, args, lock, successes, failures):
raise NotImplementedError()
def save_results(self):
raise NotImplementedError()
def create_stats(self):
raise NotImplementedError() | [
"torch.multiprocessing.Process",
"torch.device"
] | 1.1.0 | caisarl76/alfred | b73bdc1651e14c02440938b639fa3c7f3ab3d321 |
1.8 | import torch
import torch.nn as nn
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
import torch.multiprocessing as mp
import torch.distributed as dist
import torch.utils.data.distributed
import argparse
import os
import json
from models.StyleSpeech import StyleSpeech
from models.Discriminators import Discriminator
from dataloader import prepare_dataloader
from optimizer import ScheduledOptim
from evaluate import evaluate
import utils
def load_checkpoint(checkpoint_path, model, discriminator, G_optim, D_optim, rank, distributed=False):
assert os.path.isfile(checkpoint_path)
print("Starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cuda:{}'.format(rank))
if 'model' in checkpoint_dict:
if distributed:
state_dict = {}
for k,v in checkpoint_dict['model'].items():
state_dict['module.{}'.format(k)] = v
model.load_state_dict(state_dict)
else:
model.load_state_dict(checkpoint_dict['model'])
print('Model is loaded!')
if 'discriminator' in checkpoint_dict:
if distributed:
state_dict = {}
for k,v in checkpoint_dict['discriminator'].items():
state_dict['module.{}'.format(k)] = v
discriminator.load_state_dict(state_dict)
else:
discriminator.load_state_dict(checkpoint_dict['discriminator'])
print('Discriminator is loaded!')
if 'G_optim' in checkpoint_dict or 'optimizer' in checkpoint_dict:
if 'optimizer' in checkpoint_dict:
G_optim.load_state_dict(checkpoint_dict['optimizer'])
if 'G_optim' in checkpoint_dict:
G_optim.load_state_dict(checkpoint_dict['G_optim'])
print('G_optim is loaded!')
if 'D_optim' in checkpoint_dict:
D_optim.load_state_dict(checkpoint_dict['D_optim'])
print('D_optim is loaded!')
current_step = checkpoint_dict['step'] + 1
del checkpoint_dict
return model, discriminator, G_optim, D_optim, current_step
def main(rank, args, c):
print('Use GPU: {} for training'.format(rank))
ngpus = args.ngpus
if args.distributed:
torch.cuda.set_device(rank % ngpus)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=rank)
# Define model & loss
model = StyleSpeech(c).cuda()
discriminator = Discriminator(c).cuda()
num_param = utils.get_param_num(model)
D_num_param = utils.get_param_num(discriminator)
if rank==0:
print('Number of Meta-StyleSpeech Parameters:', num_param)
print('Number of Discriminator Parameters:', D_num_param)
with open(os.path.join(args.save_path, "model.txt"), "w") as f_log:
f_log.write(str(model))
f_log.write(str(discriminator))
print("Model Has Been Defined")
model_without_ddp = model
discriminator_without_ddp = discriminator
if args.distributed:
c.meta_batch_size = c.meta_batch_size // ngpus
model = nn.parallel.DistributedDataParallel(model, device_ids=[rank])
model_without_ddp = model.module
discriminator = nn.parallel.DistributedDataParallel(discriminator, device_ids=[rank])
discriminator_without_ddp = discriminator.module
# Optimizer
G_optim = torch.optim.Adam(model.parameters(), betas=c.betas, eps=c.eps)
D_optim = torch.optim.Adam(discriminator.parameters(), lr=2e-4, betas=c.betas, eps=c.eps)
# Loss
Loss = model_without_ddp.get_criterion()
adversarial_loss = discriminator_without_ddp.get_criterion()
print("Optimizer and Loss Function Defined.")
# Get dataset
data_loader = prepare_dataloader(args.data_path, "train.txt", batch_size=c.meta_batch_size, meta_learning=True, seed=rank)
print("Data Loader is Prepared")
# Load checkpoint if exists
if args.checkpoint_path is not None:
assert os.path.exists(args.checkpoint_path)
model, discriminator, G_optim, D_optim, current_step = load_checkpoint(
args.checkpoint_path, model, discriminator, G_optim, D_optim, rank, args.distributed)
print("\n---Model Restored at Step {}---\n".format(current_step))
else:
print("\n---Start New Training---\n")
current_step = 0
if rank == 0:
checkpoint_path = os.path.join(args.save_path, 'ckpt')
os.makedirs(checkpoint_path, exist_ok=True)
# scheduled optimizer
G_optim = ScheduledOptim(G_optim, c.decoder_hidden, c.n_warm_up_step, current_step)
# Init logger
if rank == 0:
log_path = os.path.join(args.save_path, 'log')
logger = SummaryWriter(os.path.join(log_path, 'board'))
with open(os.path.join(log_path, "log.txt"), "a") as f_log:
f_log.write("Dataset :{}\n Number of Parameters: {}\n".format(c.dataset, num_param))
# Init synthesis directory
if rank == 0:
synth_path = os.path.join(args.save_path, 'synth')
os.makedirs(synth_path, exist_ok=True)
model.train()
while current_step < args.max_iter:
# Get Training Loader
for idx, batch in enumerate(data_loader):
if current_step == args.max_iter:
break
losses = {}
#### Generator ####
G_optim.zero_grad()
# Get Support Data
sid, text, mel_target, D, log_D, f0, energy, \
src_len, mel_len, max_src_len, max_mel_len = model_without_ddp.parse_batch(batch)
# Support Forward
mel_output, src_output, style_vector, log_duration_output, f0_output, energy_output, src_mask, mel_mask, _ = model(
text, src_len, mel_target, mel_len, D, f0, energy, max_src_len, max_mel_len)
src_target, _, _ = model_without_ddp.variance_adaptor.length_regulator(src_output, D)
# Reconstruction loss
mel_loss, d_loss, f_loss, e_loss = Loss(mel_output, mel_target,
log_duration_output, log_D, f0_output, f0, energy_output, energy, src_len, mel_len)
losses['G_recon'] = mel_loss
losses['d_loss'] = d_loss
losses['f_loss'] = f_loss
losses['e_loss'] = e_loss
#### META LEARNING ####
# Get query text
B = mel_target.shape[0]
perm_idx = torch.randperm(B)
q_text, q_src_len = text[perm_idx], src_len[perm_idx]
# Generate query speech
q_mel_output, q_src_output, q_log_duration_output, \
_, _, q_src_mask, q_mel_mask, q_mel_len = model_without_ddp.inference(style_vector, q_text, q_src_len)
# Legulate length of query src
q_duration_rounded = torch.clamp(torch.round(torch.exp(q_log_duration_output.detach())-1.), min=0)
q_duration = q_duration_rounded.masked_fill(q_src_mask, 0).long()
q_src, _, _ = model_without_ddp.variance_adaptor.length_regulator(q_src_output, q_duration)
# Adverserial loss
t_val, s_val, _= discriminator(q_mel_output, q_src, None, sid, q_mel_mask)
losses['G_GAN_query_t'] = adversarial_loss(t_val, is_real=True)
losses['G_GAN_query_s'] = adversarial_loss(s_val, is_real=True)
# Total generator loss
alpha = 10.0
G_loss = alpha*losses['G_recon'] + losses['d_loss'] + losses['f_loss'] + losses['e_loss'] + \
losses['G_GAN_query_t'] + losses['G_GAN_query_s']
# Backward loss
G_loss.backward()
# Update weights
G_optim.step_and_update_lr()
#### Discriminator ####
D_optim.zero_grad()
# Real
real_t_pred, real_s_pred, cls_loss = discriminator(
mel_target, src_target.detach(), style_vector.detach(), sid, mask=mel_mask)
# Fake
fake_t_pred, fake_s_pred, _ = discriminator(
q_mel_output.detach(), q_src.detach(), None, sid, mask=q_mel_mask)
losses['D_t_loss'] = adversarial_loss(real_t_pred, is_real=True) + adversarial_loss(fake_t_pred, is_real=False)
losses['D_s_loss'] = adversarial_loss(real_s_pred, is_real=True) + adversarial_loss(fake_s_pred, is_real=False)
losses['cls_loss'] = cls_loss
# Total discriminator Loss
D_loss = losses['D_t_loss'] + losses['D_s_loss'] + losses['cls_loss']
# Backward
D_loss.backward()
# Update weights
D_optim.step()
# Print log
if current_step % args.log_step == 0 and current_step != 0 and rank == 0 :
m_l = losses['G_recon'].item()
d_l = losses['d_loss'].item()
f_l = losses['f_loss'].item()
e_l = losses['e_loss'].item()
g_t_l = losses['G_GAN_query_t'].item()
g_s_l = losses['G_GAN_query_s'].item()
d_t_l = losses['D_t_loss'].item() / 2
d_s_l = losses['D_s_loss'].item() / 2
cls_l = losses['cls_loss'].item()
str1 = "Step [{}/{}]:".format(current_step, args.max_iter)
str2 = "Mel Loss: {:.4f},\n" \
"Duration Loss: {:.4f}, F0 Loss: {:.4f}, Energy Loss: {:.4f}\n" \
"T G Loss: {:.4f}, T D Loss: {:.4f}, S G Loss: {:.4f}, S D Loss: {:.4f} \n" \
"cls_Loss: {:.4f};" \
.format(m_l, d_l, f_l, e_l, g_t_l, d_t_l, g_s_l, d_s_l, cls_l)
print(str1 + "\n" + str2 +"\n")
with open(os.path.join(log_path, "log.txt"), "a") as f_log:
f_log.write(str1 + "\n" + str2 +"\n")
logger.add_scalar('Train/mel_loss', m_l, current_step)
logger.add_scalar('Train/duration_loss', d_l, current_step)
logger.add_scalar('Train/f0_loss', f_l, current_step)
logger.add_scalar('Train/energy_loss', e_l, current_step)
logger.add_scalar('Train/G_t_loss', g_t_l, current_step)
logger.add_scalar('Train/D_t_loss', d_t_l, current_step)
logger.add_scalar('Train/G_s_loss', g_s_l, current_step)
logger.add_scalar('Train/D_s_loss', d_s_l, current_step)
logger.add_scalar('Train/cls_loss', cls_l, current_step)
# Save Checkpoint
if current_step % args.save_step == 0 and current_step != 0 and rank == 0:
torch.save({'model': model_without_ddp.state_dict(),
'discriminator': discriminator_without_ddp.state_dict(),
'G_optim': G_optim.state_dict(),'D_optim': D_optim.state_dict(),
'step': current_step},
os.path.join(checkpoint_path, 'checkpoint_{}.pth.tar'.format(current_step)))
print("*** Save Checkpoint ***")
print("Save model at step {}...\n".format(current_step))
if current_step % args.synth_step == 0 and current_step != 0 and rank == 0:
length = mel_len[0].item()
mel_target = mel_target[0, :length].detach().cpu().transpose(0, 1)
mel = mel_output[0, :length].detach().cpu().transpose(0, 1)
q_length = q_mel_len[0].item()
q_mel = q_mel_output[0, :q_length].detach().cpu().transpose(0, 1)
# plotting
utils.plot_data([q_mel.numpy(), mel.numpy(), mel_target.numpy()],
['Query Spectrogram', 'Recon Spectrogram', 'Ground-Truth Spectrogram'], filename=os.path.join(synth_path, 'step_{}.png'.format(current_step)))
print("Synth audios at step {}...\n".format(current_step))
# Evaluate
if current_step % args.eval_step == 0 and current_step != 0 and rank == 0:
model.eval()
with torch.no_grad():
m_l, d_l, f_l, e_l = evaluate(args, model_without_ddp, current_step)
str_v = "*** Validation ***\n" \
"Meta-StyleSpeech Step {},\n" \
"Mel Loss: {}\nDuration Loss:{}\nF0 Loss: {}\nEnergy Loss: {}" \
.format(current_step, m_l, d_l, f_l, e_l)
print(str_v + "\n" )
with open(os.path.join(log_path, "eval.txt"), "a") as f_log:
f_log.write(str_v + "\n")
logger.add_scalar('Validation/mel_loss', m_l, current_step)
logger.add_scalar('Validation/duration_loss', d_l, current_step)
logger.add_scalar('Validation/f0_loss', f_l, current_step)
logger.add_scalar('Validation/energy_loss', e_l, current_step)
model.train()
current_step += 1
if rank == 0:
print("Training Done at Step : {}".format(current_step))
torch.save({'model': model_without_ddp.state_dict(),
'discriminator': discriminator_without_ddp.state_dict(),
'G_optim': G_optim.state_dict(), 'D_optim': D_optim.state_dict(),
'step': current_step},
os.path.join(checkpoint_path, 'checkpoint_last_{}.pth.tar'.format(current_step)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='dataset/LibriTTS/preprocessed')
parser.add_argument('--save_path', default='exp_meta_stylespeech')
parser.add_argument('--config', default='configs/config.json')
parser.add_argument('--max_iter', default=100000, type=int)
parser.add_argument('--save_step', default=5000, type=int)
parser.add_argument('--synth_step', default=1000, type=int)
parser.add_argument('--eval_step', default=5000, type=int)
parser.add_argument('--log_step', default=100, type=int)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to pretrained model')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:3456', type=str, help='url for setting up distributed training')
parser.add_argument('--world-size', default=-1, type=int, help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int, help='distributed backend')
parser.add_argument('--dist-backend', default='nccl', type=str, help='node rank for distributed training')
args = parser.parse_args()
torch.backends.cudnn.enabled = True
with open(args.config) as f:
data = f.read()
json_config = json.loads(data)
config = utils.AttrDict(json_config)
utils.build_env(args.config, 'config.json', args.save_path)
ngpus = torch.cuda.device_count()
args.ngpus = ngpus
args.distributed = ngpus > 1
if args.distributed:
args.world_size = ngpus
mp.spawn(main, nprocs=ngpus, args=(args, config))
else:
main(0, args, config)
| [
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.multiprocessing.spawn",
"torch.nn.parallel.DistributedDataParallel",
"torch.randperm",
"torch.cuda.device_count",
"torch.cuda.set_device"
] | 1.8.1 | ishine/StyleSpeech-1 | f939cf9cb981db7b738fa9c9c9a7fea2dfdd0766 |
1.0 | """
Checker functions
"""
import numpy as np
import torch
PI = 3.1415
DIM = 64.0
SCALE = 255.0
FIXED_CIRCLE = False
class CentroidFunction(torch.nn.Module):
def __init__(self, bs, ch, sx, sy):
super(CentroidFunction, self).__init__()
self.x_lin = torch.nn.Parameter(torch.linspace(0, sx, sx).expand(bs, ch, sx, sy)).requires_grad_(False).cuda()
self.y_lin = torch.nn.Parameter(torch.linspace(0, sy, sy).expand(bs, ch, sy, sx).transpose(2, 3)
).requires_grad_(False).cuda()
def forward(self, img_batch):
img_batch = img_batch[:, 0:-1, ...] # Dropping the very last channel.
m00_t = img_batch.sum(dim=(2, 3))
m01_t = torch.mul(img_batch, self.x_lin)
m10_t = torch.mul(img_batch, self.y_lin)
cx_t = torch.sum(m01_t, dim=(2, 3)) / (m00_t + 0.01)
cy_t = torch.sum(m10_t, dim=(2, 3)) / (m00_t + 0.01)
return cx_t, cy_t
def p1_fn(x, torch=True):
#print(x.size())
if torch:
if FIXED_CIRCLE:
return x.mean(dim=(1,2,3)).unsqueeze(1)
else:
#return x.mean(dim=(2,3))
return x[:, 0:-1, ...].mean(dim=(2,3)) # Dropping the very last channel.
else:
return x.mean(axis=(1,2,3))
def p2_fn(x, torch=True):
pass
| [
"torch.mul",
"torch.linspace",
"torch.sum"
] | 1.0.0 | chomd90/invnet | 0d359e57b66f2e738812b5d660563fb4b3ab8f4a |
1.7 | import torch
from torch import nn
import torch.nn.functional as F
class ContentLoss(nn.Module):
"""
Content Loss for the neural style transfer algorithm.
"""
def __init__(self, target: torch.Tensor, device: torch.device) -> None:
super(ContentLoss, self).__init__()
batch_size, channels, height, width = target.size()
target = target.view(batch_size * channels, height * width)
self.target = target.detach().to(device)
def __str__(self) -> str:
return "Content loss"
def forward(self, input: torch.Tensor) -> torch.Tensor:
batch_size, channels, height, width = input.size()
input = input.view(batch_size * channels, height * width)
return F.mse_loss(input, self.target)
class StyleLoss(nn.Module):
"""
Style loss for the neural style transfer algorithm.
"""
def __init__(self, target: torch.Tensor, device: torch.device) -> None:
super(StyleLoss, self).__init__()
self.target = self.compute_gram_matrix(target).detach().to(device)
def __str__(self) -> str:
return "Style loss"
def forward(self, input: torch.Tensor) -> torch.Tensor:
input = self.compute_gram_matrix(input)
return F.mse_loss(input, self.target)
def compute_gram_matrix(self, input: torch.Tensor) -> torch.Tensor:
batch_size, channels, height, width = input.size()
input = input.view(batch_size * channels, height * width)
return torch.matmul(input, input.T).div(batch_size * channels * height * width) | [
"torch.nn.functional.mse_loss",
"torch.matmul"
] | 1.7.0 | visualCalculus/neural-style-transfer | 96f98a642dc9bf7b1ae59729b3712ff467afa38d |
1.0 | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import re
import importlib
import torch
from argparse import Namespace
import numpy as np
from PIL import Image
import os
import argparse
import dill as pickle
import util.coco
def save_obj(obj, name):
with open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name, 'rb') as f:
return pickle.load(f)
# returns a configuration for creating a generator
# |default_opt| should be the opt of the current experiment
# |**kwargs|: if any configuration should be overriden, it can be specified here
def copyconf(default_opt, **kwargs):
conf = argparse.Namespace(**vars(default_opt))
for key in kwargs:
print(key, kwargs[key])
setattr(conf, key, kwargs[key])
return conf
def tile_images(imgs, picturesPerRow=4):
""" Code borrowed from
https://stackoverflow.com/questions/26521365/cleanly-tile-numpy-array-of-images-stored-in-a-flattened-1d-format/26521997
"""
# Padding
if imgs.shape[0] % picturesPerRow == 0:
rowPadding = 0
else:
rowPadding = picturesPerRow - imgs.shape[0] % picturesPerRow
if rowPadding > 0:
imgs = np.concatenate([imgs, np.zeros((rowPadding, *imgs.shape[1:]), dtype=imgs.dtype)], axis=0)
# Tiling Loop (The conditionals are not necessary anymore)
tiled = []
for i in range(0, imgs.shape[0], picturesPerRow):
tiled.append(np.concatenate([imgs[j] for j in range(i, i + picturesPerRow)], axis=1))
tiled = np.concatenate(tiled, axis=0)
return tiled
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, normalize=True, tile=False):
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
if image_tensor.dim() == 4:
# transform each image in the batch
images_np = []
for b in range(image_tensor.size(0)):
one_image = image_tensor[b]
one_image_np = tensor2im(one_image)
images_np.append(one_image_np.reshape(1, *one_image_np.shape))
images_np = np.concatenate(images_np, axis=0)
if tile:
images_tiled = tile_images(images_np)
return images_tiled
else:
return images_np
if image_tensor.dim() == 2:
image_tensor = image_tensor.unsqueeze(0)
image_numpy = image_tensor.detach().cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1:
image_numpy = image_numpy[:, :, 0]
return image_numpy.astype(imtype)
# Converts a one-hot tensor into a colorful label map
def tensor2label(label_tensor, n_label, imtype=np.uint8, tile=False):
if label_tensor.dim() == 4:
# transform each image in the batch
images_np = []
for b in range(label_tensor.size(0)):
one_image = label_tensor[b]
one_image_np = tensor2label(one_image, n_label, imtype)
images_np.append(one_image_np.reshape(1, *one_image_np.shape))
images_np = np.concatenate(images_np, axis=0)
if tile:
images_tiled = tile_images(images_np)
return images_tiled
else:
images_np = images_np[0]
return images_np
if label_tensor.dim() == 1:
return np.zeros((64, 64, 3), dtype=np.uint8)
if n_label == 0:
return tensor2im(label_tensor, imtype)
label_tensor = label_tensor.cpu().float()
if label_tensor.size()[0] > 1:
label_tensor = label_tensor.max(0, keepdim=True)[1]
label_tensor = Colorize(n_label)(label_tensor)
label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))
result = label_numpy.astype(imtype)
return result
def save_image(image_numpy, image_path, create_dir=False):
if create_dir:
os.makedirs(os.path.dirname(image_path), exist_ok=True)
if len(image_numpy.shape) == 2:
image_numpy = np.expand_dims(image_numpy, axis=2)
if image_numpy.shape[2] == 1:
image_numpy = np.repeat(image_numpy, 3, 2)
image_pil = Image.fromarray(image_numpy)
# save to png
image_pil.save(image_path.replace('.jpg', '.png'))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split('(\d+)', text)]
def natural_sort(items):
items.sort(key=natural_keys)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def find_class_in_module(target_cls_name, module):
target_cls_name = target_cls_name.replace('_', '').lower()
clslib = importlib.import_module(module)
cls = None
for name, clsobj in clslib.__dict__.items():
if name.lower() == target_cls_name:
cls = clsobj
if cls is None:
print("In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name))
exit(0)
return cls
def save_network(net, label, epoch, opt):
save_filename = '%s_net_%s.pth' % (epoch, label)
save_path = os.path.join(opt.checkpoints_dir, opt.name, save_filename)
torch.save(net.cpu().state_dict(), save_path)
if len(opt.gpu_ids) and torch.cuda.is_available():
net.cuda()
def load_network(net, label, epoch, opt):
save_filename = '%s_net_%s.pth' % (epoch, label)
save_dir = os.path.join(opt.checkpoints_dir, opt.name)
save_path = os.path.join(save_dir, save_filename)
weights = torch.load(save_path)
net.load_state_dict(weights)
return net
###############################################################################
# Code from
# https://github.com/ycszen/pytorch-seg/blob/master/transform.py
# Modified so it complies with the Citscape label map colors
###############################################################################
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])
def labelcolormap(N):
if N == 35: # cityscape
cmap = np.array([(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (111, 74, 0), (81, 0, 81),
(128, 64, 128), (244, 35, 232), (250, 170, 160), (230, 150, 140), (70, 70, 70), (102, 102, 156), (190, 153, 153),
(180, 165, 180), (150, 100, 100), (150, 120, 90), (153, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0),
(107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),
(0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100), (0, 0, 230), (119, 11, 32), (0, 0, 142)],
dtype=np.uint8)
else:
cmap = np.zeros((N, 3), dtype=np.uint8)
for i in range(N):
r, g, b = 0, 0, 0
id = i + 1 # let's give 0 a color
for j in range(7):
str_id = uint82bin(id)
r = r ^ (np.uint8(str_id[-1]) << (7 - j))
g = g ^ (np.uint8(str_id[-2]) << (7 - j))
b = b ^ (np.uint8(str_id[-3]) << (7 - j))
id = id >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
if N == 182: # COCO
important_colors = {
'sea': (54, 62, 167),
'sky-other': (95, 219, 255),
'tree': (140, 104, 47),
'clouds': (170, 170, 170),
'grass': (29, 195, 49)
}
for i in range(N):
name = util.coco.id2label(i)
if name in important_colors:
color = important_colors[name]
cmap[i] = np.array(list(color))
return cmap
class Colorize(object):
def __init__(self, n=35):
self.cmap = labelcolormap(n)
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(0, len(self.cmap)):
mask = (label == gray_image[0]).cpu()
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
| [
"torch.from_numpy",
"torch.ByteTensor",
"torch.cuda.is_available",
"torch.load"
] | 1.0.0 | atmacvit/meronymnet | 47e1a7caadc0f770439bb26a93b885f790f62804 |
1.0 | import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.sync_batchnorm import SynchronizedBatchNorm2d
import torch.nn.utils.spectral_norm as spectral_norm
# Returns a function that creates a normalization function
# that does not condition on semantic map
def get_nonspade_norm_layer(opt, norm_type='instance'):
# helper function to get # output channels of the previous layer
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
# this function will be returned
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
if subnorm_type == 'none' or len(subnorm_type) == 0:
return layer
# remove bias in the previous layer, which is meaningless
# since it has no effect after normalization
if getattr(layer, 'bias', None) is not None:
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if subnorm_type == 'batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'sync_batch':
norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'instance':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
else:
raise ValueError('normalization layer %s is not recognized' % subnorm_type)
return nn.Sequential(layer, norm_layer)
return add_norm_layer
class SPADE(nn.Module):
def __init__(self, config_text, norm_nc, label_nc):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
| [
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.utils.spectral_norm",
"torch.nn.InstanceNorm2d"
] | 1.0.0 | atmacvit/meronymnet | 47e1a7caadc0f770439bb26a93b885f790f62804 |
1.0 | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json, os, random
from collections import defaultdict
import torch
from torch.utils.data import Dataset
import torchvision.transforms as T
from data.preprocess import *
import PIL
from PIL import Image
from utils.data import imagenet_preprocess, Resize
from torch.utils.data import DataLoader
class CustomDataset(Dataset):
def __init__(self, image_dir, instances_json, stuff_json=None,
stuff_only=False, image_size=(64, 64), mask_size=16,
normalize_images=False, max_samples=None,
include_relationships=True, min_object_size=0.02,
min_objects_per_image=3, max_objects_per_image=8,
include_other=False, instance_whitelist=None, stuff_whitelist=None, data_mode='train'):
"""
A PyTorch Dataset for loading Coco and Coco-Stuff annotations and converting
them to scene graphs on the fly.
Inputs:
- image_dir: Path to a directory where images are held
- instances_json: Path to a JSON file giving COCO annotations
- stuff_json: (optional) Path to a JSON file giving COCO-Stuff annotations
- stuff_only: (optional, default True) If True then only iterate over
images which appear in stuff_json; if False then iterate over all images
in instances_json.
- image_size: Size (H, W) at which to load images. Default (64, 64).
- mask_size: Size M for object segmentation masks; default 16.
- normalize_image: If True then normalize images by subtracting ImageNet
mean pixel and dividing by ImageNet std pixel.
- max_samples: If None use all images. Other wise only use images in the
range [0, max_samples). Default None.
- include_relationships: If True then include spatial relationships; if
False then only include the trivial __in_image__ relationship.
- min_object_size: Ignore objects whose bounding box takes up less than
this fraction of the image.
- min_objects_per_image: Ignore images which have fewer than this many
object annotations.
- max_objects_per_image: Ignore images which have more than this many
object annotations.
- include_other: If True, include COCO-Stuff annotations which have category
"other". Default is False, because I found that these were really noisy
and pretty much impossible for the system to model.
- instance_whitelist: None means use all instance categories. Otherwise a
list giving a whitelist of instance category names to use.
- stuff_whitelist: None means use all stuff categories. Otherwise a list
giving a whitelist of stuff category names to use.
"""
super(Dataset, self).__init__()
class_names = ['cow', 'person', 'sheep', 'bird', 'cat', 'dog', 'horse', 'aeroplane', 'motorbike', 'bicycle']
data_dict = preprocess_make_data(class_names, data_mode)
self.data_dict = data_dict
if stuff_only and stuff_json is None:
print('WARNING: Got stuff_only=True but stuff_json=None.')
print('Falling back to stuff_only=False.')
self.image_dir = image_dir
self.mask_size = mask_size
self.max_samples = max_samples
self.normalize_images = normalize_images
self.include_relationships = include_relationships
self.set_image_size(image_size)
#with open(instances_json, 'r') as f:
# instances_data = json.load(f)
#stuff_data = None
# if stuff_json is not None and stuff_json != '':
# # with open(stuff_json, 'r') as f:
# # stuff_data = json.load(f)
#
# self.image_ids = []
# self.image_id_to_filename = {}
# self.image_id_to_size = {}
#for image_data in instances_data['images']:
# image_id = image_data['id']
# filename = image_data['file_name']
# width = image_data['width']
# height = image_data['height']
# self.image_ids.append(image_id)
# self.image_id_to_filename[image_id] = filename
# self.image_id_to_size[image_id] = (width, height)
self.vocab = {
'object_name_to_idx': {},
'pred_name_to_idx': {},
}
# object_idx_to_name = {}
# all_instance_categories = []
# for category_data in instances_data['categories']:
# category_id = category_data['id']
# category_name = category_data['name']
# all_instance_categories.append(category_name)
# object_idx_to_name[category_id] = category_name
# self.vocab['object_name_to_idx'][category_name] = category_id
all_stuff_categories = []
#if stuff_data:
# for category_data in stuff_data['categories']:
# category_name = category_data['name']
# category_id = category_data['id']
# all_stuff_categories.append(category_name)
# object_idx_to_name[category_id] = category_name
# self.vocab['object_name_to_idx'][category_name] = category_id
#if instance_whitelist is None:
# instance_whitelist = all_instance_categories
#if stuff_whitelist is None:
# stuff_whitelist = all_stuff_categories
#category_whitelist = set(instance_whitelist) | set(stuff_whitelist)
# Add object data from instances
#self.image_id_to_objects = defaultdict(list)
# for object_data in instances_data['annotations']:
# image_id = object_data['image_id']
# _, _, w, h = object_data['bbox']
# W, H = self.image_id_to_size[image_id]
# box_area = (w * h) / (W * H)
# box_ok = box_area > min_object_size
# object_name = object_idx_to_name[object_data['category_id']]
# category_ok = object_name in category_whitelist
# other_ok = object_name != 'other' or include_other
# if box_ok and category_ok and other_ok:
# self.image_id_to_objects[image_id].append(object_data)
# Add object data from stuff
#if stuff_data:
# image_ids_with_stuff = set()
# for object_data in stuff_data['annotations']:
# image_id = object_data['image_id']
# image_ids_with_stuff.add(image_id)
# _, _, w, h = object_data['bbox']
# W, H = self.image_id_to_size[image_id]
# box_area = (w * h) / (W * H)
# box_ok = box_area > min_object_size
# object_name = object_idx_to_name[object_data['category_id']]
# category_ok = object_name in category_whitelist
# other_ok = object_name != 'other' or include_other
# if box_ok and category_ok and other_ok:
# self.image_id_to_objects[image_id].append(object_data)
# if stuff_only:
# new_image_ids = []
# for image_id in self.image_ids:
# if image_id in image_ids_with_stuff:
# new_image_ids.append(image_id)
# self.image_ids = new_image_ids
# all_image_ids = set(self.image_id_to_filename.keys())
# image_ids_to_remove = all_image_ids - image_ids_with_stuff
# for image_id in image_ids_to_remove:
# self.image_id_to_filename.pop(image_id, None)
# self.image_id_to_size.pop(image_id, None)
# self.image_id_to_objects.pop(image_id, None)
# COCO category labels start at 1, so use 0 for __image__
self.vocab['object_name_to_idx']['__image__'] = 0
# Build object_idx_to_name
# name_to_idx = self.vocab['object_name_to_idx']
# assert len(name_to_idx) == len(set(name_to_idx.values()))
# max_object_idx = max(name_to_idx.values())
# idx_to_name = ['NONE'] * (1 + max_object_idx)
# for name, idx in self.vocab['object_name_to_idx'].items():
# idx_to_name[idx] = name
# self.vocab['object_idx_to_name'] = idx_to_name
# self.num_objects = len(self.vocab['object_idx_to_name'])
# Prune images that have too few or too many objects
# new_image_ids = []
# total_objs = 0
# for image_id in self.image_ids:
# num_objs = len(self.image_id_to_objects[image_id])
# total_objs += num_objs
# if min_objects_per_image <= num_objs <= max_objects_per_image:
# new_image_ids.append(image_id)
# self.image_ids = new_image_ids
# self.vocab['pred_idx_to_name'] = [
# '__in_image__',
# 'left of',
# 'right of',
# 'above',
# 'below',
# 'inside',
# 'surrounding',
# ]
# self.vocab['pred_name_to_idx'] = {}
# for idx, name in enumerate(self.vocab['pred_idx_to_name']):
# self.vocab['pred_name_to_idx'][name] = idx
self.num_objects=25
def set_image_size(self, image_size):
print('called set_image_size', image_size)
transform = [Resize(image_size), T.ToTensor()]
if self.normalize_images:
transform.append(imagenet_preprocess())
self.transform = T.Compose(transform)
self.image_size = image_size
def total_objects(self):
total_objs = 0
for id in self.data_dict.keys():
total_objs = total_objs + self.data_dict[id]['boxes'].shape[0]
return total_objs
# for i, image_id in enumerate(self.image_ids):
# if self.max_samples and i >= self.max_samples:
# break
# num_objs = len(self.image_id_to_objects[image_id])
# # total_objs += num_objs
# # return total_objs
def __len__(self):
# if self.max_samples is None:
# return len(self.image_ids)
# return min(len(self.image_ids), self.max_samples)
return len(self.data_dict.keys())
def __getitem__(self, index):
"""
Get the pixels of an image, and a random synthetic scene graph for that
image constructed on-the-fly from its COCO object annotations. We assume
that the image will have height H, width W, C channels; there will be O
object annotations, each of which will have both a bounding box and a
segmentation mask of shape (M, M). There will be T triples in the scene
graph.
Returns a tuple of:
- image: FloatTensor of shape (C, H, W)
- objs: LongTensor of shape (O,)
- boxes: FloatTensor of shape (O, 4) giving boxes for objects in
(x0, y0, x1, y1) format, in a [0, 1] coordinate system
- masks: LongTensor of shape (O, M, M) giving segmentation masks for
objects, where 0 is background and 1 is object.
- triples: LongTensor of shape (T, 3) where triples[t] = [i, p, j]
means that (objs[i], p, objs[j]) is a triple.
"""
image_id = str(index)
# filename = self.image_id_to_filename[image_id]
# image_path = os.path.join(self.image_dir, filename)
#with open(image_path, 'rb') as f:
# with PIL.Image.open(f) as image:
# WW, HH = image.size
# image = self.transform(image.convert('RGB'))
img = self.data_dict[image_id]['image']
img = Image.fromarray(np.uint8(img))
#img.save(image_id + '_.png')
WW, HH = img.size
image = self.transform(img)
bbxs = self.data_dict[image_id]['boxes'].tolist()
class_name = self.data_dict[image_id]['class']
H, W = self.image_size
objs, boxes, masks, obj_to_cls = [], [], [], []
for bbx in bbxs:
x0, y0, x1, y1 = bbx
#x0 = x0 * 550.0/ WW
#y0 = y0 * 550.0/ HH
#x1 = x1 * 550.0/ WW
#y1 = y1 * 550.0/ HH
boxes.append(torch.FloatTensor([x0, y0, x1, y1]))
# This will give a numpy array of shape (HH, WW)
mask = torch.zeros(1, H, W)
# mask = seg_to_mask(object_data['segmentation'], WW, HH)
mask[:, round(y0 * H):max(round(y0 * H)+1, round(y1 * H)), round(x0 * W):max(round(x0 * W)+1, round(x1 * W))] = 1
# Crop the mask according to the bounding box, being careful to
# ensure that we don't crop a zero-area region
# mx0, mx1 = int(round(x)), int(round(x + w))
# my0, my1 = int(round(y)), int(round(y + h))
# mx1 = max(mx0 + 1, mx1)
# my1 = max(my0 + 1, my1)
# mask = mask[my0:my1, mx0:mx1]
# mask = imresize(255 * mask, (self.mask_size, self.mask_size),
# mode='constant')
# mask = torch.from_numpy((mask > 128).astype(np.int64))
masks.append(mask)
obj_to_cls.append(torch.from_numpy(class_name))
for obj in self.data_dict[image_id]['labels']:
objs.append(obj)
# Add dummy __image__ object
# objs.append(self.vocab['object_name_to_idx']['__image__'])
# boxes.append(torch.FloatTensor([0, 0, 1, 1]))
# masks.append(torch.ones(self.mask_size, self.mask_size).long())
print(objs)
# shuffle objs
O = len(objs)
rand_idx = list(range(O))
random.shuffle(rand_idx)
objs = [objs[i] for i in rand_idx]
boxes = [boxes[i] for i in rand_idx]
masks = [masks[i] for i in rand_idx]
obj_to_cls = [obj_to_cls[i] for i in rand_idx]
objs = torch.LongTensor(objs)
boxes = torch.stack(boxes, dim=0)
masks = torch.stack(masks, dim=0)
obj_to_cls = torch.stack(obj_to_cls, dim=0)
# box_areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# Compute centers of all objects
# obj_centers = []
# _, MH, MW = masks.size()
# for i, obj_idx in enumerate(objs):
# x0, y0, x1, y1 = boxes[i]
# mask = (masks[i] == 1)
# xs = torch.linspace(x0, x1, MW).view(1, MW).expand(MH, MW)
# ys = torch.linspace(y0, y1, MH).view(MH, 1).expand(MH, MW)
# if mask.sum() == 0:
# mean_x = 0.5 * (x0 + x1)
# mean_y = 0.5 * (y0 + y1)
# else:
# mean_x = xs[mask].mean()
# mean_y = ys[mask].mean()
# obj_centers.append([mean_x, mean_y])
# obj_centers = torch.FloatTensor(obj_centers)
#
# # Add triples
# triples = []
# num_objs = objs.size(0)
# __image__ = self.vocab['object_name_to_idx']['__image__']
# real_objs = []
# if num_objs > 1:
# real_objs = (objs != __image__).nonzero().squeeze(1)
# for cur in real_objs:
# choices = [obj for obj in real_objs if obj != cur]
# if len(choices) == 0 or not self.include_relationships:
# break
# other = random.choice(choices)
# if random.random() > 0.5:
# s, o = cur, other
# else:
# s, o = other, cur
#
# # Check for inside / surrounding
# sx0, sy0, sx1, sy1 = boxes[s]
# ox0, oy0, ox1, oy1 = boxes[o]
# d = obj_centers[s] - obj_centers[o]
# theta = math.atan2(d[1], d[0])
#
# if sx0 < ox0 and sx1 > ox1 and sy0 < oy0 and sy1 > oy1:
# p = 'surrounding'
# elif sx0 > ox0 and sx1 < ox1 and sy0 > oy0 and sy1 < oy1:
# p = 'inside'
# elif theta >= 3 * math.pi / 4 or theta <= -3 * math.pi / 4:
# p = 'left of'
# elif -3 * math.pi / 4 <= theta < -math.pi / 4:
# p = 'above'
# elif -math.pi / 4 <= theta < math.pi / 4:
# p = 'right of'
# elif math.pi / 4 <= theta < 3 * math.pi / 4:
# p = 'below'
# p = self.vocab['pred_name_to_idx'][p]
# triples.append([s, p, o])
# Add __in_image__ triples
# in_image = self.vocab['pred_name_to_idx']['__in_image__']
# for i in range(O - 1):
# triples.append([i, in_image, O - 1])
#
# triples = torch.LongTensor(triples)
return image, objs, boxes, masks, obj_to_cls, torch.from_numpy(class_name)
# def seg_to_mask(seg, width=1.0, height=1.0):
# """
# Tiny utility for decoding segmentation masks using the pycocotools API.
# """
# if type(seg) == list:
# rles = mask_utils.frPyObjects(seg, height, width)
# rle = mask_utils.merge(rles)
# elif type(seg['counts']) == list:
# rle = mask_utils.frPyObjects(seg, height, width)
# else:
# rle = seg
# return mask_utils.decode(rle)
def custom_collate_fn(batch):
"""
Collate function to be used when wrapping CocoSceneGraphDataset in a
DataLoader. Returns a tuple of the following:
- imgs: FloatTensor of shape (N, C, H, W)
- objs: LongTensor of shape (O,) giving object categories
- boxes: FloatTensor of shape (O, 4)
- masks: FloatTensor of shape (O, M, M)
- triples: LongTensor of shape (T, 3) giving triples
- obj_to_img: LongTensor of shape (O,) mapping objects to images
- triple_to_img: LongTensor of shape (T,) mapping triples to images
"""
all_imgs, all_classes, all_objs, all_boxes, all_masks, all_obj_to_cls, all_obj_to_img = [], [], [], [], [], [], []
for i, (img, objs, boxes, masks, obj_to_cls, class_name) in enumerate(batch):
all_imgs.append(img[None])
O = objs.size(0)
all_objs.append(objs)
all_boxes.append(boxes)
all_masks.append(masks)
all_classes.append(class_name[None])
all_obj_to_cls.append(obj_to_cls)
all_obj_to_img.append(torch.LongTensor(O).fill_(i))
all_imgs = torch.cat(all_imgs)
all_objs = torch.cat(all_objs)
all_boxes = torch.cat(all_boxes)
all_masks = torch.cat(all_masks)
all_classes = torch.cat(all_classes)
all_obj_to_cls = torch.cat(all_obj_to_cls)
all_obj_to_img = torch.cat(all_obj_to_img)
out = (all_imgs, all_objs, all_boxes, all_masks, all_classes, all_obj_to_cls, all_obj_to_img)
print("loader obj to class " + str(all_obj_to_cls.shape))
print("loader class shape" + str((all_classes).shape))
return out
def get_dataloader(batch_size=10, CUSTOM_DIR='/home/zhaobo/Data/coco', instance_whitelist=None, stuff_whitelist=None, coco_include_other=False):
coco_train_image_dir = None#os.path.join(COCO_DIR, 'images/val2017')
coco_val_image_dir = None#os.path.join(COCO_DIR, 'images/val2017')
coco_train_instances_json = None#os.path.join(COCO_DIR, 'annotations/instances_val2017.json')
coco_train_stuff_json = None#os.path.join(COCO_DIR, 'annotations/stuff_train2017.json')
coco_val_instances_json = None#os.path.join(COCO_DIR, 'annotations/instances_val2017.json')
coco_val_stuff_json = None#os.path.join(COCO_DIR, 'annotations/stuff_val2017.json')
min_object_size = 0.02
min_objects_per_image = 3
coco_stuff_only = False
mask_size = 32
image_size = (128, 128)
num_train_samples = None
num_val_samples = None
include_relationships = False
batch_size = batch_size
shuffle_val = False
# build datasets
dset_kwargs = {
'image_dir': coco_train_image_dir,
'instances_json': coco_train_instances_json,
'stuff_json': coco_train_stuff_json,
'stuff_only': coco_stuff_only,
'image_size': image_size,
'mask_size': mask_size,
'max_samples': num_train_samples,
'min_object_size': min_object_size,
'min_objects_per_image': min_objects_per_image,
'instance_whitelist': instance_whitelist,
'stuff_whitelist': stuff_whitelist,
'include_other': coco_include_other,
'include_relationships': include_relationships,
'data_mode': 'train',
}
train_dset = CustomDataset(**dset_kwargs)
num_objs = train_dset.total_objects()
num_imgs = len(train_dset)
print('Training dataset has %d images and %d objects' % (num_imgs, num_objs))
print('(%.2f objects per image)' % (float(num_objs) / num_imgs))
dset_kwargs['image_dir'] = coco_val_image_dir
dset_kwargs['instances_json'] = coco_val_instances_json
dset_kwargs['stuff_json'] = coco_val_stuff_json
dset_kwargs['max_samples'] = num_val_samples
dset_kwargs['data_mode'] = 'test'
val_dset = CustomDataset(**dset_kwargs)
#assert train_dset.vocab == val_dset.vocab
#vocab = json.loads(json.dumps(train_dset.vocab))
# build dataloader
loader_kwargs = {
'batch_size': batch_size,
'num_workers': 1,
'shuffle': True,
'collate_fn': custom_collate_fn,
}
train_loader = DataLoader(train_dset, **loader_kwargs)
loader_kwargs['shuffle'] = shuffle_val
loader_kwargs['num_workers'] = 1
val_loader = DataLoader(val_dset, **loader_kwargs)
return train_loader, val_loader
if __name__ == '__main__':
train_loader, val_loader = get_dataloader(batch_size=32)
# test reading data
for i, batch in enumerate(train_loader):
imgs, objs, boxes, masks, classes, obj_to_img = batch
print(imgs.shape, objs.shape, boxes.shape, masks.shape, classes, obj_to_img.shape)
if i == 20: break
| [
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.FloatTensor",
"torch.from_numpy",
"torch.LongTensor",
"torch.utils.data.DataLoader"
] | 1.0.0 | atmacvit/meronymnet | 47e1a7caadc0f770439bb26a93b885f790f62804 |
1.0 | import json, os, random, math
from collections import defaultdict
import torch
from torch.utils.data import Dataset
import torchvision.transforms as T
import numpy as np
import PIL
from skimage.transform import resize as imresize
import pycocotools.mask as mask_utils
from random import shuffle
from data.preprocess import *
from PIL import Image
class CustomDataset(Dataset):
def __init__(self, image_dir, instances_json, stuff_json=None,
stuff_only=True, image_size=(64, 64), mask_size=16,
normalize_images=False, max_samples=None,
include_relationships=True, min_object_size=0.02,
min_objects_per_image=3, max_objects_per_image=25, left_right_flip=False,
include_other=False, instance_whitelist=None, stuff_whitelist=None, data_mode='train', classes=None):
"""
A PyTorch Dataset for loading Coco and Coco-Stuff annotations and converting
them to scene graphs on the fly.
Inputs:
- image_dir: Path to a directory where images are held
- instances_json: Path to a JSON file giving COCO annotations
- stuff_json: (optional) Path to a JSON file giving COCO-Stuff annotations
- stuff_only: (optional, default True) If True then only iterate over
images which appear in stuff_json; if False then iterate over all images
in instances_json.
- image_size: Size (H, W) at which to load images. Default (64, 64).
- mask_size: Size M for object segmentation masks; default 16.
- normalize_image: If True then normalize images by subtracting ImageNet
mean pixel and dividing by ImageNet std pixel.
- max_samples: If None use all images. Other wise only use images in the
range [0, max_samples). Default None.
- include_relationships: If True then include spatial relationships; if
False then only include the trivial __in_image__ relationship.
- min_object_size: Ignore objects whose bounding box takes up less than
this fraction of the image.
- min_objects_per_image: Ignore images which have fewer than this many
object annotations.
- max_objects_per_image: Ignore images which have more than this many
object annotations.
- include_other: If True, include COCO-Stuff annotations which have category
"other". Default is False, because I found that these were really noisy
and pretty much impossible for the system to model.
- instance_whitelist: None means use all instance categories. Otherwise a
list giving a whitelist of instance category names to use.
- stuff_whitelist: None means use all stuff categories. Otherwise a list
giving a whitelist of stuff category names to use.
"""
super(Dataset, self).__init__()
class_names = classes if classes is not ['all'] else ['cow', 'person', 'sheep', 'bird', 'cat', 'dog', 'horse', 'aeroplane', 'motorbike', 'bicycle']
data_dict = preprocess_make_data(class_names, data_mode)
self.data_dict = data_dict
if stuff_only and stuff_json is None:
print('WARNING: Got stuff_only=True but stuff_json=None.')
print('Falling back to stuff_only=False.')
self.image_dir = image_dir
self.mask_size = mask_size
self.max_samples = max_samples
self.max_objects_per_image = max_objects_per_image
self.normalize_images = normalize_images
self.include_relationships = include_relationships
self.left_right_flip = left_right_flip
self.set_image_size(image_size)
# with open(instances_json, 'r') as f:
# instances_data = json.load(f)
stuff_data = None
# if stuff_json is not None and stuff_json != '':
# with open(stuff_json, 'r') as f:
# stuff_data = json.load(f)
# self.image_ids = []
# self.image_id_to_filename = {}
# self.image_id_to_size = {}
# for image_data in instances_data['images']:
# image_id = image_data['id']
# filename = image_data['file_name']
# width = image_data['width']
# height = image_data['height']
# self.image_ids.append(image_id)
# self.image_id_to_filename[image_id] = filename
# self.image_id_to_size[image_id] = (width, height)
self.vocab = {
'object_name_to_idx': {},
'pred_name_to_idx': {},
}
# COCO category labels start at 1, so use 0 for __image__
self.vocab['object_name_to_idx']['__image__'] = 0
# Build object_idx_to_name
# name_to_idx = self.vocab['object_name_to_idx']
# assert len(name_to_idx) == len(set(name_to_idx.values()))
# max_object_idx = max(name_to_idx.values())
# idx_to_name = ['NONE'] * (1 + max_object_idx)
# for name, idx in self.vocab['object_name_to_idx'].items():
# idx_to_name[idx] = name
# self.vocab['object_idx_to_name'] = idx_to_name
# Prune images that have too few or too many objects
# new_image_ids = []
# total_objs = 0
# for image_id in self.image_ids:
# num_objs = len(self.image_id_to_objects[image_id])
# total_objs += num_objs
# if min_objects_per_image <= num_objs <= max_objects_per_image:
# new_image_ids.append(image_id)
# self.image_ids = new_image_ids
# self.vocab['pred_idx_to_name'] = [
# '__in_image__',
# 'left of',
# 'right of',
# 'above',
# 'below',
# 'inside',
# 'surrounding',
# ]
# self.vocab['pred_name_to_idx'] = {}
# for idx, name in enumerate(self.vocab['pred_idx_to_name']):
# self.vocab['pred_name_to_idx'][name] = idx
#self.max_objects_per_image = 25
def set_image_size(self, image_size):
print('called set_image_size', image_size)
transform = [Resize(image_size), T.ToTensor()]
if self.normalize_images:
transform.append(imagenet_preprocess())
self.transform = T.Compose(transform)
self.image_size = image_size
def total_objects(self):
total_objs = 0
for id in self.data_dict.keys():
total_objs = total_objs + self.data_dict[id]['boxes'].shape[0]
return total_objs
def __len__(self):
return len(self.data_dict.keys())
def __getitem__(self, index):
"""
Get the pixels of an image, and a random synthetic scene graph for that
image constructed on-the-fly from its COCO object annotations. We assume
that the image will have height H, width W, C channels; there will be O
object annotations, each of which will have both a bounding box and a
segmentation mask of shape (M, M). There will be T triples in the scene
graph.
Returns a tuple of:
- image: FloatTensor of shape (C, H, W)
- objs: LongTensor of shape (O,)
- boxes: FloatTensor of shape (O, 4) giving boxes for objects in
(x0, y0, x1, y1) format, in a [0, 1] coordinate system
- masks: LongTensor of shape (O, M, M) giving segmentation masks for
objects, where 0 is background and 1 is object.
- triples: LongTensor of shape (T, 3) where triples[t] = [i, p, j]
means that (objs[i], p, objs[j]) is a triple.
"""
image_id = str(index)
img = self.data_dict[image_id]['image']
#flip = False
#if index >= len(self.image_ids):
# index = index - len(self.image_ids)
# flip = True
#image_id = self.image_ids[index]
#filename = self.image_id_to_filename[image_id]
#image_path = os.path.join(self.image_dir, filename)
#with open(image_path, 'rb') as f:
# with PIL.Image.open(f) as image:
# if flip:
# image = PIL.ImageOps.mirror(image)
# WW, HH = image.size
# image = self.transform(image.convert('RGB'))
img = Image.fromarray(np.uint8(img))
WW, HH = img.size
image = self.transform(img)
bbxs = self.data_dict[image_id]['boxes'].tolist()
class_name = self.data_dict[image_id]['class']
H, W = self.image_size
objs, boxes, masks = [], [], []
for bbx in bbxs:
x0, y0, x1, y1 = bbx
#x0 = x0 * 550.0/WW
#y0 = y0 * 550.0/HH
#x1 = x1 * 550.0/WW
#y1 = y1 * 550.0/HH
x1 = (x1*550.0 - x0*550.0 + 1.0)/550.0
y1 = (y1*550.0 - y0*550.0 + 1.0)/550.0
boxes.append(torch.FloatTensor([x0, y0, x1, y1]))
#boxes.append(torch.FloatTensor([0.0, 0.0, 1.0, 1.0]))
# This will give a numpy array of shape (HH, WW)
#mask = torch.zeros(1, H, W)
# mask = seg_to_mask(object_data['segmentation'], WW, HH)
#mask[:, round(y0 * H):max(round(y0 * H)+1, round(y1 * H)), round(x0 * W):max(round(x0 * W)+1, round(x1 * W))] = 1
# Crop the mask according to the bounding box, being careful to
# ensure that we don't crop a zero-area region
# mx0, mx1 = int(round(x)), int(round(x + w))
# my0, my1 = int(round(y)), int(round(y + h))
# mx1 = max(mx0 + 1, mx1)
# my1 = max(my0 + 1, my1)
# mask = mask[my0:my1, mx0:mx1]
# mask = imresize(255 * mask, (self.mask_size, self.mask_size),
# mode='constant')
#mask = torch.from_numpy((mask > 128).astype(np.int64))
#masks.append(mask)
#obj_to_cls.append(torch.from_numpy(class_name))
for obj in self.data_dict[image_id]['labels']:
objs.append(obj)
# obj_masks = []
#for object_data in self.image_id_to_objects[image_id]:
# objs.append(object_data['category_id'])
# x, y, w, h = object_data['bbox']
# x0 = x / WW
# y0 = y / HH
# x1 = (w) / WW
# y1 = (h) / HH
# if flip:
# x0 = 1 - (x0 + x1)
# boxes.append(np.array([x0, y0, x1, y1]))
# # This will give a numpy array of shape (HH, WW)
# mask = seg_to_mask(object_data['segmentation'], WW, HH)
# if flip:
# mask = mask[:, ::-1]
# x = WW - x - w
# # Crop the mask according to the bounding box, being careful to
# # ensure that we don't crop a zero-area region
# # mx0, mx1 = int(round(x)), int(round(x + w))
# # my0, my1 = int(round(y)), int(round(y + h))
# # mx1 = max(mx0 + 1, mx1)
# # my1 = max(my0 + 1, my1)
# # obj_mask = mask[my0:my1, mx0:mx1]
# # obj_mask = imresize(255.0 * obj_mask, (self.mask_size, self.mask_size),
# # mode='constant')
# # obj_mask = torch.from_numpy((obj_mask > 128).astype(np.int64))
# # obj_masks.append(obj_mask)
#
# mask = imresize(255.0 * mask, (self.image_size[0], self.image_size[1]),
# mode='constant')
# mask = torch.from_numpy((mask > 128).astype(np.int64))
# masks.append(mask)
# shuffle(objs)
# shuffle(boxes)
# shuffle(masks)
# Add dummy __image__ object
# objs.append(184)
# boxes.append(torch.FloatTensor([0, 0, 1, 1]))
# masks.append(torch.ones(self.mask_size, self.mask_size).long())
# add 0 for number of objects
for _ in range(len(objs), self.max_objects_per_image):
objs.append(self.vocab['object_name_to_idx']['__image__'])
boxes.append(torch.FloatTensor([0.0, 0.0, 1.0, 1.0]))
# masks.append(torch.zeros((self.image_size[0], self.image_size[1])).long())
# obj_masks.append(torch.zeros((self.mask_size, self.mask_size)).long())
#objs = torch.LongTensor(objs)
#boxes = np.vstack(boxes)
#O = len(objs)
#rand_idx = list(range(O))
#random.shuffle(rand_idx)
#objs = [objs[i] for i in rand_idx]
#boxes = [boxes[i] for i in rand_idx]
objs = torch.LongTensor(objs)
boxes = torch.stack(boxes, dim=0)
# masks = torch.stack(masks, dim=0)
# obj_masks = torch.stack(obj_masks, dim=0)
# b_map = self.get_bbox_map_p(boxes)
# box_areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# Compute centers of all objects
"""
obj_centers = []
_, MH, MW = masks.size()
for i, obj_idx in enumerate(objs):
x0, y0, x1, y1 = boxes[i]
mask = (masks[i] == 1)
xs = torch.linspace(x0, x1, MW).view(1, MW).expand(MH, MW)
ys = torch.linspace(y0, y1, MH).view(MH, 1).expand(MH, MW)
if mask.sum() == 0:
mean_x = 0.5 * (x0 + x1)
mean_y = 0.5 * (y0 + y1)
else:
mean_x = xs[mask].mean()
mean_y = ys[mask].mean()
obj_centers.append([mean_x, mean_y])
obj_centers = torch.FloatTensor(obj_centers)
# Add triples
triples = []
num_objs = objs.size(0)
__image__ = self.vocab['object_name_to_idx']['__image__']
real_objs = []
if num_objs > 1:
real_objs = (objs != __image__).nonzero().squeeze(1)
for cur in real_objs:
choices = [obj for obj in real_objs if obj != cur]
if len(choices) == 0 or not self.include_relationships:
break
other = random.choice(choices)
if random.random() > 0.5:
s, o = cur, other
else:
s, o = other, cur
# Check for inside / surrounding
sx0, sy0, sx1, sy1 = boxes[s]
ox0, oy0, ox1, oy1 = boxes[o]
d = obj_centers[s] - obj_centers[o]
theta = math.atan2(d[1], d[0])
if sx0 < ox0 and sx1 > ox1 and sy0 < oy0 and sy1 > oy1:
p = 'surrounding'
elif sx0 > ox0 and sx1 < ox1 and sy0 > oy0 and sy1 < oy1:
p = 'inside'
elif theta >= 3 * math.pi / 4 or theta <= -3 * math.pi / 4:
p = 'left of'
elif -3 * math.pi / 4 <= theta < -math.pi / 4:
p = 'above'
elif -math.pi / 4 <= theta < math.pi / 4:
p = 'right of'
elif math.pi / 4 <= theta < 3 * math.pi / 4:
p = 'below'
p = self.vocab['pred_name_to_idx'][p]
triples.append([s, p, o])
# Add __in_image__ triples
O = objs.size(0)
in_image = self.vocab['pred_name_to_idx']['__in_image__']
for i in range(O - 1):
triples.append([i, in_image, O - 1])
"""
cls = torch.from_numpy(class_name)
print("Image {}".format(image.shape))
print("Labels {}".format(objs.shape))
print("Bbox {}".format(boxes.shape))
print("Class {}".format(cls.shape))
# triples = torch.LongTensor(triples)
return image, objs, boxes, cls## , b_map #, None # obj_masks #, obj_masks # , b_map # masks # , triples
def get_bbox_map_p(self, bbox):
mapping = np.zeros((len(bbox), self.image_size[0], self.image_size[0]))
for idx in range(self.max_objects_per_image):
if min(bbox[idx]) < 0:
continue
line_space = np.linspace(0, self.image_size[0]-1, num=self.image_size[0])
xv, yv = np.meshgrid(line_space, line_space)
mapping[idx][(xv < int((bbox[idx][0] + bbox[idx][2]) * self.image_size[0])) * (xv > int(bbox[idx][0] * self.image_size[0])) *
(yv < int((bbox[idx][1] + bbox[idx][3]) * self.image_size[0])) * (yv > int(bbox[idx][1] * self.image_size[0]))] = 1
return mapping
def seg_to_mask(seg, width=1.0, height=1.0):
"""
Tiny utility for decoding segmentation masks using the pycocotools API.
"""
if type(seg) == list:
rles = mask_utils.frPyObjects(seg, height, width)
rle = mask_utils.merge(rles)
elif type(seg['counts']) == list:
rle = mask_utils.frPyObjects(seg, height, width)
else:
rle = seg
return mask_utils.decode(rle)
def coco_collate_fn(batch):
"""
Collate function to be used when wrapping CocoSceneGraphDataset in a
DataLoader. Returns a tuple of the following:
- imgs: FloatTensor of shape (N, C, H, W)
- objs: LongTensor of shape (O,) giving object categories
- boxes: FloatTensor of shape (O, 4)
- masks: FloatTensor of shape (O, M, M)
- triples: LongTensor of shape (T, 3) giving triples
- obj_to_img: LongTensor of shape (O,) mapping objects to images
- triple_to_img: LongTensor of shape (T,) mapping triples to images
"""
all_imgs, all_objs, all_boxes, all_masks, all_triples = [], [], [], [], []
all_obj_to_img, all_triple_to_img = [], []
obj_offset = 0
for i, (img, objs, boxes, masks, triples) in enumerate(batch):
all_imgs.append(img[None])
if objs.dim() == 0 or triples.dim() == 0:
continue
O, T = objs.size(0), triples.size(0)
all_objs.append(objs)
all_boxes.append(boxes)
all_masks.append(masks)
triples = triples.clone()
triples[:, 0] += obj_offset
triples[:, 2] += obj_offset
all_triples.append(triples)
all_obj_to_img.append(torch.LongTensor(O).fill_(i))
all_triple_to_img.append(torch.LongTensor(T).fill_(i))
obj_offset += O
all_imgs = torch.cat(all_imgs)
all_objs = torch.cat(all_objs)
all_boxes = torch.cat(all_boxes)
all_masks = torch.cat(all_masks)
all_triples = torch.cat(all_triples)
all_obj_to_img = torch.cat(all_obj_to_img)
all_triple_to_img = torch.cat(all_triple_to_img)
out = (all_imgs, all_objs, all_boxes, all_masks, all_triples,
all_obj_to_img, all_triple_to_img)
return out
# IMAGENET_MEAN = [0.485, 0.456, 0.406]
# IMAGENET_STD = [0.229, 0.224, 0.225]
IMAGENET_MEAN = [0.5, 0.5, 0.5]
IMAGENET_STD = [0.5, 0.5, 0.5]
INV_IMAGENET_MEAN = [-m for m in IMAGENET_MEAN]
INV_IMAGENET_STD = [1.0 / s for s in IMAGENET_STD]
def imagenet_preprocess():
return T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)
def rescale(x):
lo, hi = x.min(), x.max()
return x.sub(lo).div(hi - lo)
def imagenet_deprocess(rescale_image=True):
transforms = [
T.Normalize(mean=[0, 0, 0], std=INV_IMAGENET_STD),
T.Normalize(mean=INV_IMAGENET_MEAN, std=[1.0, 1.0, 1.0]),
]
if rescale_image:
transforms.append(rescale)
return T.Compose(transforms)
def imagenet_deprocess_batch(imgs, rescale=True):
"""
Input:
- imgs: FloatTensor of shape (N, C, H, W) giving preprocessed images
Output:
- imgs_de: ByteTensor of shape (N, C, H, W) giving deprocessed images
in the range [0, 255]
"""
if isinstance(imgs, torch.autograd.Variable):
imgs = imgs.data
imgs = imgs.cpu().clone()
deprocess_fn = imagenet_deprocess(rescale_image=rescale)
imgs_de = []
for i in range(imgs.size(0)):
img_de = deprocess_fn(imgs[i])[None]
img_de = img_de.mul(255).clamp(0, 255).byte()
imgs_de.append(img_de)
imgs_de = torch.cat(imgs_de, dim=0)
return imgs_de
class Resize(object):
def __init__(self, size, interp=PIL.Image.BILINEAR):
if isinstance(size, tuple):
H, W = size
self.size = (W, H)
else:
self.size = (size, size)
self.interp = interp
def __call__(self, img):
return img.resize(self.size, self.interp)
def unpack_var(v):
if isinstance(v, torch.autograd.Variable):
return v.data
return v
def split_graph_batch(triples, obj_data, obj_to_img, triple_to_img):
triples = unpack_var(triples)
obj_data = [unpack_var(o) for o in obj_data]
obj_to_img = unpack_var(obj_to_img)
triple_to_img = unpack_var(triple_to_img)
triples_out = []
obj_data_out = [[] for _ in obj_data]
obj_offset = 0
N = obj_to_img.max() + 1
for i in range(N):
o_idxs = (obj_to_img == i).nonzero().view(-1)
t_idxs = (triple_to_img == i).nonzero().view(-1)
cur_triples = triples[t_idxs].clone()
cur_triples[:, 0] -= obj_offset
cur_triples[:, 2] -= obj_offset
triples_out.append(cur_triples)
for j, o_data in enumerate(obj_data):
cur_o_data = None
if o_data is not None:
cur_o_data = o_data[o_idxs]
obj_data_out[j].append(cur_o_data)
obj_offset += o_idxs.size(0)
return triples_out, obj_data_out
| [
"torch.cat",
"torch.stack",
"torch.FloatTensor",
"torch.from_numpy",
"torch.LongTensor"
] | 1.0.0 | atmacvit/meronymnet | 47e1a7caadc0f770439bb26a93b885f790f62804 |
1.0 | # coding=utf-8
# Copyright 2021 The UCLA NLP Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch VisualBERT model. """
import math
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, KLDivLoss, LogSoftmax
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
MultipleChoiceModelOutput,
SequenceClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_visual_bert import VisualBertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "VisualBertConfig"
_CHECKPOINT_FOR_DOC = "uclanlp/visualbert-vqa-coco-pre"
VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"uclanlp/visualbert-vqa",
"uclanlp/visualbert-vqa-pre",
"uclanlp/visualbert-vqa-coco-pre",
"uclanlp/visualbert-vcr",
"uclanlp/visualbert-vcr-pre",
"uclanlp/visualbert-vcr-coco-pre",
"uclanlp/visualbert-nlvr2",
"uclanlp/visualbert-nlvr2-pre",
"uclanlp/visualbert-nlvr2-coco-pre"
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
]
class VisualBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings and visual embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
# For Visual Features
# Token type and position embedding for image features
self.visual_token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.visual_position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
if config.special_visual_initialize:
self.visual_token_type_embeddings.weight.data = nn.Parameter(
self.token_type_embeddings.weight.data.clone(), requires_grad=True
)
self.visual_position_embeddings.weight.data = nn.Parameter(
self.position_embeddings.weight.data.clone(), requires_grad=True
)
self.visual_projection = nn.Linear(config.visual_embedding_dim, config.hidden_size)
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
visual_embeds=None,
visual_token_type_ids=None,
image_text_alignment=None,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
# Absolute Position Embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
if visual_embeds is not None:
if visual_token_type_ids is None:
visual_token_type_ids = torch.ones(
visual_embeds.size()[:-1], dtype=torch.long, device=self.position_ids.device
)
visual_embeds = self.visual_projection(visual_embeds)
visual_token_type_embeddings = self.visual_token_type_embeddings(visual_token_type_ids)
if image_text_alignment is not None:
# image_text_alignment = Batch x image_length x alignment_number.
# Each element denotes the position of the word corresponding to the image feature. -1 is the padding value.
dtype = token_type_embeddings.dtype
image_text_alignment_mask = (image_text_alignment != -1).long()
# Get rid of the -1.
image_text_alignment = image_text_alignment_mask * image_text_alignment
# Batch x image_length x alignment length x dim
visual_position_embeddings = self.position_embeddings(image_text_alignment)
visual_position_embeddings *= image_text_alignment_mask.to(dtype=dtype).unsqueeze(-1)
visual_position_embeddings = visual_position_embeddings.sum(2)
# We want to averge along the alignment_number dimension.
image_text_alignment_mask = image_text_alignment_mask.to(dtype=dtype).sum(2)
if (image_text_alignment_mask == 0).sum() != 0:
image_text_alignment_mask[image_text_alignment_mask == 0] = 1 # Avoid divide by zero error
logger.warning(
"Found 0 values in `image_text_alignment_mask`. Setting them to 1 to avoid divide-by-zero error."
)
visual_position_embeddings = visual_position_embeddings / image_text_alignment_mask.unsqueeze(-1)
visual_position_ids = torch.zeros(
*visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device
)
# When fine-tuning the detector , the image_text_alignment is sometimes padded too long.
if visual_position_embeddings.size(1) != visual_embeds.size(1):
if visual_position_embeddings.size(1) < visual_embeds.size(1):
raise ValueError(
f"Visual position embeddings length: {visual_position_embeddings.size(1)}"
f"should be the same as `visual_embeds` length: {visual_embeds.size(1)}"
)
visual_position_embeddings = visual_position_embeddings[:, : visual_embeds.size(1), :]
visual_position_embeddings = visual_position_embeddings + self.visual_position_embeddings(
visual_position_ids
)
else:
visual_position_ids = torch.zeros(
*visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device
)
visual_position_embeddings = self.visual_position_embeddings(visual_position_ids)
visual_embeddings = visual_embeds + visual_position_embeddings + visual_token_type_embeddings
embeddings = torch.cat((embeddings, visual_embeddings), dim=1)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class VisualBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in VisualBertSelfAttentionModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->VisualBert
class VisualBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class VisualBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = VisualBertSelfAttention(config)
self.output = VisualBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->VisualBert
class VisualBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->VisualBert
class VisualBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class VisualBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = VisualBertAttention(config)
self.intermediate = VisualBertIntermediate(config)
self.output = VisualBertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class VisualBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([VisualBertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
)
else:
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions
)
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->VisualBert
class VisualBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->VisualBert
class VisualBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->VisualBert
class VisualBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = VisualBertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->VisualBert
class VisualBertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = VisualBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class VisualBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = VisualBertConfig
base_model_prefix = "visual_bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
class VisualBertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.VisualBertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the sentence-image prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the sentence-image prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
VISUAL_BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.VisualBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
VISUAL_BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
visual_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, visual_seq_length, visual_embedding_dim)`, `optional`):
The embedded representation of the visual inputs, generally derived using using an object detector.
visual_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, visual_seq_length)`, `optional`):
Mask to avoid performing attention on visual embeddings. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
visual_token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, visual_seq_length)`, `optional`):
Segment token indices to indicate different portions of the visual embeds.
`What are token type IDs? <../glossary.html#token-type-ids>`_ The authors of VisualBERT set the
`visual_token_type_ids` to `1` for all tokens.
image_text_alignment (:obj:`torch.LongTensor` of shape :obj:`(batch_size, visual_seq_length, alignment_number)`, `optional`):
Image-Text alignment uses to decide the position IDs of the visual embeddings.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare VisualBert Model transformer outputting raw hidden-states without any specific head on top.",
VISUAL_BERT_START_DOCSTRING,
)
class VisualBertModel(VisualBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = VisualBertEmbeddings(config)
self.encoder = VisualBertEncoder(config)
self.pooler = VisualBertPooler(config) if add_pooling_layer else None
self.bypass_transformer = config.bypass_transformer
if self.bypass_transformer:
self.additional_layer = VisualBertLayer(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image.
>>> from transformers import BertTokenizer, VisualBertModel
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = VisualBertModel.from_pretrained('uclanlp/visualbert-vqa-coco-pre')
>>> inputs = tokenizer("The capital of France is Paris.", return_tensors="pt")
>>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)
>>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example
>>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
>>> inputs.update({{
... "visual_embeds": visual_embeds,
... "visual_token_type_ids": visual_token_type_ids,
... "visual_attention_mask": visual_attention_mask
... }})
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if visual_embeds is None:
raise ValueError(
f"`visual_embeds` can not be of type {type(visual_embeds)} when using a VisualBert Model."
)
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_input_shape = visual_embeds.size()[:-1]
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if visual_attention_mask is None:
visual_attention_mask = torch.ones(visual_input_shape, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
combined_attention_mask = torch.cat((attention_mask, visual_attention_mask), dim=-1)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
combined_attention_mask, [batch_size, input_shape + visual_input_shape], device
)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
)
if self.bypass_transformer and visual_embeds is not None:
text_length = input_ids.size(1)
text_embedding_output = embedding_output[:, :text_length, :]
visual_embedding_output = embedding_output[:, text_length:, :]
text_extended_attention_mask = extended_attention_mask[:, :, text_length, :text_length]
encoded_outputs = self.encoder(
text_embedding_output,
attention_mask=text_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoded_outputs[0]
concatenated_input = torch.cat((sequence_output, visual_embedding_output), dim=1)
sequence_output = self.additional_layer(concatenated_input, extended_attention_mask)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
else:
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""
VisualBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`sentence-image prediction (classification)` head.
""",
VISUAL_BERT_START_DOCSTRING,
)
class VisualBertForPreTraining(VisualBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.visual_bert = VisualBertModel(config)
self.cls = VisualBertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=VisualBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
sentence_image_labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, total_sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
sentence_image_labels (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the sentence-image prediction (classification) loss. Input should be a sequence
pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a matching pair of sequence A for the given image,
- 1 indicates sequence B is a random sequence w.r.t A for the given image.
Returns:
Example::
>>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.
>>> from transformers import BertTokenizer, VisualBertForPreTraining
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = VisualBertForPreTraining.from_pretrained('uclanlp/visualbert-vqa-coco-pre')
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt")
>>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)
>>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example
>>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
>>> inputs.update({{
... "visual_embeds": visual_embeds,
... "visual_token_type_ids": visual_token_type_ids,
... "visual_attention_mask": visual_attention_mask
... }})
>>> max_length = inputs["input_ids"].shape[-1]+visual_embeds.shape[-2]
>>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt", padding="max_length", max_length=max_length)["input_ids"]
>>> sentence_image_labels = torch.tensor(1).unsqueeze(0) # Batch_size
>>> outputs = model(**inputs, labels=labels, sentence_image_labels=sentence_image_labels)
>>> loss = outputs.loss
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and sentence_image_labels is not None:
total_size = attention_mask.size(-1) + visual_attention_mask.size(-1)
if labels.size(-1) != total_size:
raise ValueError(
f"The labels provided should have same sequence length as total attention mask."
f"Found labels with sequence length {labels.size(-1)}, expected {total_size}."
)
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
sentence_image_loss = loss_fct(seq_relationship_score.view(-1, 2), sentence_image_labels.view(-1))
total_loss = masked_lm_loss + sentence_image_loss
if labels is not None and sentence_image_labels is None:
total_size = attention_mask.size(-1) + visual_attention_mask.size(-1)
if labels.size(-1) != total_size:
raise ValueError(
f"The labels provided should have same sequence length as total attention mask."
f"Found labels with sequence length {labels.size(-1)}, expected {total_size}."
)
loss_fct = CrossEntropyLoss()
total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return VisualBertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
VisualBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for VCR tasks.
""",
VISUAL_BERT_START_DOCSTRING,
)
class VisualBertForMultipleChoice(VisualBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.visual_bert = VisualBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.cls = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors.
(See :obj:`input_ids` above)
Returns:
Example::
>>> from transformers import BertTokenizer, VisualBertForMultipleChoice
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = VisualBertForMultipleChoice.from_pretrained('uclanlp/visualbert-vcr')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> visual_embeds = get_visual_embeddings(image)
>>> # (batch_size, num_choices, visual_seq_length, visual_embedding_dim)
>>> visual_embeds = visual_embeds.expand(1, 2, *visual_embeds.shape)
>>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
>>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True)
>>> # batch size is 1
>>> inputs_dict = {{k: v.unsqueeze(0) for k,v in encoding.items()}}
>>> inputs_dict.update({{
... visual_embeds=visual_embeds,
... visual_attention_mask=visual_attention_mask,
... visual_token_type_ids=visual_token_type_ids,
... labels=labels
... }})
>>> outputs = model(**inputs_dict)
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
visual_embeds = (
visual_embeds.view(-1, visual_embeds.size(-2), visual_embeds.size(-1))
if visual_embeds is not None
else None
)
visual_attention_mask = (
visual_attention_mask.view(-1, visual_attention_mask.size(-1))
if visual_attention_mask is not None
else None
)
visual_token_type_ids = (
visual_token_type_ids.view(-1, visual_token_type_ids.size(-1))
if visual_token_type_ids is not None
else None
)
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
_, pooled_output = outputs[0], outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.cls(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
VisualBert Model with a classification/regression head on top (a dropout and a linear layer on top of the pooled
output) for VQA.
""",
VISUAL_BERT_START_DOCSTRING,
)
class VisualBertForQuestionAnswering(VisualBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.visual_bert = VisualBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.cls = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, total_sequence_length)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits.
Returns:
Example::
>>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.
>>> from transformers import BertTokenizer, VisualBertForQuestionAnswering
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = VisualBertForQuestionAnswering.from_pretrained('uclanlp/visualbert-vqa')
>>> text = "Who is eating the apple?"
>>> inputs = tokenizer(text, return_tensors='pt')
>>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)
>>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example
>>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
>>> inputs.update({{
... "visual_embeds": visual_embeds,
... "visual_token_type_ids": visual_token_type_ids,
... "visual_attention_mask": visual_attention_mask
... }})
>>> labels = torch.tensor([[0.0,1.0]]).unsqueeze(0) # Batch size 1, Num labels 2
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> scores = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Get the index of the last text token
index_to_gather = attention_mask.sum(1) - 2 # as in original code
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# TO-CHECK: From the original code
index_to_gather = (
index_to_gather.unsqueeze(-1).unsqueeze(-1).expand(index_to_gather.size(0), 1, sequence_output.size(-1))
)
pooled_output = torch.gather(sequence_output, 1, index_to_gather)
pooled_output = self.dropout(pooled_output)
logits = self.cls(pooled_output)
reshaped_logits = logits.view(-1, self.num_labels)
loss = None
if labels is not None:
loss_fct = nn.KLDivLoss(reduction="batchmean")
log_softmax = nn.LogSoftmax(dim=-1)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
VisualBert Model with a sequence classification head on top (a dropout and a linear layer on top of the pooled
output) for Visual Reasoning e.g. for NLVR task.
""",
VISUAL_BERT_START_DOCSTRING,
)
class VisualBertForVisualReasoning(VisualBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.visual_bert = VisualBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.cls = nn.Linear(config.hidden_size, config.num_labels) # 2
self.init_weights()
@add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. A classification loss is computed (Cross-Entropy) against these labels.
Returns:
Example::
>>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.
>>> from transformers import BertTokenizer, VisualBertForVisualReasoning
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = VisualBertForVisualReasoning.from_pretrained('uclanlp/visualbert-nlvr2')
>>> text = "Who is eating the apple?"
>>> inputs = tokenizer(text, return_tensors='pt')
>>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)
>>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example
>>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
>>> inputs.update({{
... "visual_embeds": visual_embeds,
... "visual_token_type_ids": visual_token_type_ids,
... "visual_attention_mask": visual_attention_mask
... }})
>>> labels = torch.tensor(1).unsqueeze(0) # Batch size 1, Num choices 2
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> scores = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# sequence_output = outputs[0]
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.cls(pooled_output)
reshaped_logits = logits.contiguous()
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class VisualBertRegionToPhraseAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = 1 # config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, query, key, attention_mask):
attention_mask = attention_mask.to(query.dtype)
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = (1.0 - attention_mask) * -10000.0
mixed_query_layer = self.query(query)
mixed_key_layer = self.key(key)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
attention_scores = attention_scores.squeeze(1)
return attention_scores
@add_start_docstrings(
"""
VisualBert Model with a Masked Language Modeling head and an attention layer on top for Region-to-Phrase Alignment
e.g. for Flickr30 Entities task.
""",
VISUAL_BERT_START_DOCSTRING,
)
class VisualBertForRegionToPhraseAlignment(VisualBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.visual_bert = VisualBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.cls = VisualBertPreTrainingHeads(config)
self.attention = VisualBertRegionToPhraseAttention(config)
self.init_weights()
@add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
region_to_phrase_position=None,
labels=None,
):
r"""
region_to_phrase_position (:obj:`torch.LongTensor` of shape ``(batch_size, total_sequence_length)``, `optional`):
The positions depicting the position of the image embedding corresponding to the textual tokens.
labels (:obj:`torch.LongTensor` of shape ``(batch_size, total_sequence_length, visual_sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. KLDivLoss is computed against these labels and
the outputs from the attention layer.
Returns:
Example::
>>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.
>>> from transformers import BertTokenizer, VisualBertForRegionToPhraseAlignment
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = VisualBertForRegionToPhraseAlignment.from_pretrained('uclanlp/visualbert-vqa-coco-pre')
>>> text = "Who is eating the apple?"
>>> inputs = tokenizer(text, return_tensors='pt')
>>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)
>>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example
>>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
>>> region_to_phrase_position = torch.ones((1, inputs["input_ids"].shape[-1]+visual_embeds.shape[-2]))
>>> inputs.update({{
... "region_to_phrase_position": region_to_phrase_position,
... "visual_embeds": visual_embeds,
... "visual_token_type_ids": visual_token_type_ids,
... "visual_attention_mask": visual_attention_mask
... }})
>>> labels = torch.ones((1, inputs["input_ids"].shape[-1]+visual_embeds.shape[-2], visual_embeds.shape[-2])) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> scores = outputs.logits
"""
if region_to_phrase_position is None:
raise ValueError("`region_to_phrase_position` should not be None when using Flickr Model.")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
region_to_phrase_position_mask = (region_to_phrase_position != -1).long()
# Make the -1 become 0
region_to_phrase_position = region_to_phrase_position * region_to_phrase_position_mask
# Selected_positions = batch x selected position x dim
expanded_region_to_phrase_positions = region_to_phrase_position.unsqueeze(2).expand(
region_to_phrase_position.size(0), region_to_phrase_position.size(1), sequence_output.size(2)
)
selected_positions = sequence_output.gather(1, expanded_region_to_phrase_positions)
# Visual Features = batch x visual_feature_length x dim
# This will need separate image and visual masks.
visual_features = sequence_output[:, attention_mask.size(1) :]
if visual_features.size(1) != visual_attention_mask.size(1):
raise ValueError(
f"Visual features length :{visual_features.size(1)} should be the same"
f" as visual attention mask length: {visual_attention_mask.size(1)}."
)
logits = self.attention(selected_positions, visual_features, visual_attention_mask)
loss = None
if labels is not None:
# scores = batch x selected position x visual_feature
# scores = selected_positions.bmm(visual_features.transpose(1,2))
# label = batch x selected_postion x needed position
loss_fct = KLDivLoss(reduction="batchmean")
log_softmax = LogSoftmax(dim=-1)
scores = log_softmax(logits)
labels = labels.contiguous()
loss = loss_fct(scores, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.nn.LogSoftmax",
"torch.nn.Softmax",
"torch.gather",
"torch.arange",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.nn.KLDivLoss",
"torch.matmul",
"torch.nn.Embedding"
] | 1.0 | diiogofernands/transformers | f5cd27694a0c7d0036954c8350f774a5c1181a57 |
1.7 | import torch
from kge import Config, Dataset
from kge.model.kge_model import RelationalScorer, KgeModel
class ComplExScorer(RelationalScorer):
r"""Implementation of the ComplEx KGE scorer.
Reference: Théo Trouillon, Johannes Welbl, Sebastian Riedel, Éric Gaussier and
Guillaume Bouchard: Complex Embeddings for Simple Link Prediction. ICML 2016.
`<http://proceedings.mlr.press/v48/trouillon16.pdf>`_
"""
def __init__(self, config: Config, dataset: Dataset, configuration_key=None):
super().__init__(config, dataset, configuration_key)
def score_emb(self, s_emb, p_emb, o_emb, combine: str):
n = p_emb.size(0)
# Here we use a fast implementation of computing the ComplEx scores using
# Hadamard products, as in Eq. (11) of paper.
#
# Split the relation and object embeddings into real part (first half) and
# imaginary part (second half).
p_emb_re, p_emb_im = (t.contiguous() for t in p_emb.chunk(2, dim=1))
o_emb_re, o_emb_im = (t.contiguous() for t in o_emb.chunk(2, dim=1))
# combine them again to create a column block for each required combination
s_all = torch.cat((s_emb, s_emb), dim=1) # re, im, re, im
r_all = torch.cat((p_emb_re, p_emb, -p_emb_im), dim=1) # re, re, im, -im
o_all = torch.cat((o_emb, o_emb_im, o_emb_re), dim=1) # re, im, im, re
if combine == "spo":
out = (s_all * o_all * r_all).sum(dim=1)
elif combine == "sp_":
out = (s_all * r_all).mm(o_all.transpose(0, 1))
elif combine == "_po":
out = (r_all * o_all).mm(s_all.transpose(0, 1))
else:
return super().score_emb(s_emb, p_emb, o_emb, combine)
return out.view(n, -1)
class ComplEx(KgeModel):
r"""Implementation of the ComplEx KGE model."""
def __init__(
self,
config: Config,
dataset: Dataset,
configuration_key=None,
init_for_load_only=False,
):
super().__init__(
config=config,
dataset=dataset,
scorer=ComplExScorer,
configuration_key=configuration_key,
init_for_load_only=init_for_load_only,
)
| [
"torch.cat"
] | 1.7.1 | alexgaskell10/encoded_kge | 2959c058125515a3e0e0b811ffe8086d6699006c |
1.7 | import os
from collections import OrderedDict
import sys
import torch
import csv
import yaml
import socket
import copy
from kge.job import Trace
from kge import Config
## EXPORTED METHODS #####################################################################
def add_dump_parsers(subparsers):
# 'kge dump' can have associated sub-commands which can have different args
parser_dump = subparsers.add_parser("dump", help="Dump objects to stdout")
subparsers_dump = parser_dump.add_subparsers(
title="dump_command", dest="dump_command"
)
subparsers_dump.required = True
_add_dump_trace_parser(subparsers_dump)
_add_dump_checkpoint_parser(subparsers_dump)
_add_dump_config_parser(subparsers_dump)
def dump(args):
"""Execute the 'kge dump' commands. """
if args.dump_command == "trace":
_dump_trace(args)
elif args.dump_command == "checkpoint":
_dump_checkpoint(args)
elif args.dump_command == "config":
_dump_config(args)
else:
raise ValueError()
def get_config_for_job_id(job_id, folder_path):
config = Config(load_default=True)
if job_id:
config_path = os.path.join(
folder_path, "config", job_id.split("-")[0] + ".yaml"
)
else:
config_path = os.path.join(folder_path, "config.yaml")
if os.path.isfile(config_path):
config.load(config_path, create=True)
else:
raise Exception("Could not find config file for {}".format(job_id))
return config
### DUMP CHECKPOINT #####################################################################
def _add_dump_checkpoint_parser(subparsers_dump):
parser_dump_checkpoint = subparsers_dump.add_parser(
"checkpoint", help=("Dump information stored in a checkpoint")
)
parser_dump_checkpoint.add_argument(
"source",
help="A path to either a checkpoint or a job folder (then uses best or, "
"if not present, last checkpoint).",
nargs="?",
default=".",
)
parser_dump_checkpoint.add_argument(
"--keys",
"-k",
type=str,
nargs="*",
help="List of keys to include (separated by space)",
)
def _dump_checkpoint(args):
"""Execute the 'dump checkpoint' command."""
# Determine checkpoint to use
if os.path.isfile(args.source):
checkpoint_file = args.source
else:
checkpoint_file = Config.best_or_last_checkpoint_file(args.source)
# Load the checkpoint and strip some fieleds
checkpoint = torch.load(checkpoint_file, map_location="cpu")
# Dump it
print(f"# Dump of checkpoint: {checkpoint_file}")
print(f"parameter_names: {list(checkpoint['model'][0].keys())}")
excluded_keys = {"model", "optimizer_state_dict"}
if args.keys is not None:
excluded_keys = {key for key in excluded_keys if key not in args.keys}
excluded_keys = excluded_keys.union(
{key for key in checkpoint if key not in args.keys}
)
excluded_keys = {key for key in excluded_keys if key in checkpoint}
for key in excluded_keys:
del checkpoint[key]
if excluded_keys:
print(f"# Excluded keys: {excluded_keys}")
yaml.dump(checkpoint, sys.stdout)
### DUMP TRACE ##########################################################################
def _add_dump_trace_parser(subparsers_dump):
parser_dump_trace = subparsers_dump.add_parser(
"trace",
help=(
"Dump the trace of a job to stdout as CSV (default) or YAML. The tracefile"
" is processed backwards starting from the last entry. Further options"
" allow to start processing from a particular checkpoint, job_id, or"
" epoch number."
),
)
parser_dump_trace.add_argument(
"source",
help="A path to either a checkpoint or a job folder.",
nargs="?",
default=".",
)
parser_dump_trace.add_argument(
"--train",
action="store_const",
const=True,
default=False,
help=(
"Include entries from training jobs (enabled when none of --train, --valid,"
" or --test is specified)."
),
)
parser_dump_trace.add_argument(
"--valid",
action="store_const",
const=True,
default=False,
help=(
"Include entries from validation or evaluation jobs on the valid split"
" (enabled when none of --train, --valid, or --test is specified)."
),
)
parser_dump_trace.add_argument(
"--test",
action="store_const",
const=True,
default=False,
help=(
"Include entries from evaluation on the test data split (enabled when "
" none of --train, --valid, or --test is specified)."
),
)
parser_dump_trace.add_argument(
"--search",
action="store_const",
const=True,
default=False,
help=(
"Dump the tracefile of a search job. The best result of every "
" search trial is dumped. The options --train, --valid, --test,"
" --truncate, --job_id, --checkpoint, --batch, and --example are not"
" applicable."
),
)
parser_dump_trace.add_argument(
"--keysfile",
default=False,
help=(
"A path to a file which contains lines in the format"
" 'new_key_name'='key_name'. For every line in the keys file, the command"
" searches the value of 'key_name' in the trace entries (first) and"
" config (second) and adds a respective column in the CSV file with name"
" 'new_key_name'. Additionally, for 'key_name' the special keys '$folder',"
" '$machine' '$checkpoint' and '$base_model' can be used."
),
)
parser_dump_trace.add_argument(
"--keys",
"-k",
nargs="*",
type=str,
help=(
"A list of 'key' entries (separated by space). Each 'key' has form"
" 'new_key_name=key_name' or 'key_name'. This adds a column as in the"
" --keysfile option. When only 'key_name' is provided, it is also used as"
" the column name in the CSV file."
),
)
parser_dump_trace.add_argument(
"--checkpoint",
default=False,
action="store_const",
const=True,
help=(
"If source is a path to a job folder and --checkpoint is set, the best"
" (if present) or last checkpoint is used to determine the job_id from"
" where the tracefile is processed backwards."
),
)
parser_dump_trace.add_argument(
"--job_id",
default=False,
help=(
"Specifies the training job_id in the tracefile from where to start"
" processing backwards when no checkpoint is specified. If not provided,"
" the job_id of the last training job entry in the tracefile is used."
),
)
parser_dump_trace.add_argument(
"--truncate",
action="store",
default=False,
const=True,
nargs="?",
help=(
"Takes an integer argument which defines the maximum epoch number from"
" where the tracefile is processed backwards. If not provided, all epochs"
" are included (the epoch number can still be bounded by a specified"
" job_id or checkpoint). When a checkpoint is specified, (by providing one"
" explicitly as source or by using --checkpoint), --truncate can"
" additionally be enabled without an argument which sets the maximum epoch"
" number to the epoch provided by the checkpoint."
),
)
parser_dump_trace.add_argument(
"--yaml",
action="store_const",
const=True,
default=False,
help="Dump YAML instead of CSV.",
)
parser_dump_trace.add_argument(
"--batch",
action="store_const",
const=True,
default=False,
help="Include entries on batch level.",
)
parser_dump_trace.add_argument(
"--example",
action="store_const",
const=True,
default=False,
help="Include entries on example level.",
)
parser_dump_trace.add_argument(
"--no-header",
action="store_const",
const=True,
default=False,
help="Exclude column names (header) from the CSV file.",
)
parser_dump_trace.add_argument(
"--no-default-keys",
"-K",
action="store_const",
const=True,
default=False,
help="Exclude default keys from the CSV file.",
)
parser_dump_trace.add_argument(
"--list-keys",
action="store",
const=True,
default=False,
nargs="?",
help="Output the CSV default keys and all usable keys for --keysfile and --keys"
" for the given configuration of options. Takes an optional string argument"
" which separates the listed keys (default comma), e.g. use $'\\n' to display"
" every key on a new line.",
)
def _dump_trace(args):
"""Execute the 'dump trace' command."""
if (
args.train
or args.valid
or args.test
or args.truncate
or args.job_id
or args.checkpoint
or args.batch
or args.example
) and args.search:
sys.exit(
"--search and any of --train, --valid, --test, --truncate, --job_id,"
" --checkpoint, --batch, --example are mutually exclusive"
)
entry_type_specified = True
if not (args.train or args.valid or args.test or args.search):
entry_type_specified = False
args.train = True
args.valid = True
args.test = True
truncate_flag = False
truncate_epoch = None
if isinstance(args.truncate, bool) and args.truncate:
truncate_flag = True
elif not isinstance(args.truncate, bool):
if not args.truncate.isdigit():
sys.exit("Integer argument or no argument for --truncate must be used")
truncate_epoch = int(args.truncate)
checkpoint_path = None
if ".pt" in os.path.split(args.source)[-1]:
checkpoint_path = args.source
folder_path = os.path.split(args.source)[0]
else:
# determine job_id and epoch from last/best checkpoint automatically
if args.checkpoint:
checkpoint_path = Config.best_or_last_checkpoint_file(args.source)
folder_path = args.source
if not checkpoint_path and truncate_flag:
sys.exit(
"--truncate can only be used as a flag when a checkpoint is specified."
" Consider specifying a checkpoint or use an integer argument for the"
" --truncate option"
)
if checkpoint_path and args.job_id:
sys.exit(
"--job_id cannot be used together with a checkpoint as the checkpoint"
" already specifies the job_id"
)
trace = os.path.join(folder_path, "trace.yaml")
if not os.path.isfile(trace):
sys.exit(f"No file 'trace.yaml' found at {os.path.abspath(folder_path)}")
# process additional keys from --keys and --keysfile
keymap = OrderedDict()
additional_keys = []
if args.keysfile:
with open(args.keysfile, "r") as keyfile:
additional_keys = keyfile.readlines()
if args.keys:
additional_keys += args.keys
for line in additional_keys:
line = line.rstrip("\n").replace(" ", "")
name_key = line.split("=")
if len(name_key) == 1:
name_key += name_key
keymap[name_key[0]] = name_key[1]
job_id = None
# use job_id and truncate_epoch from checkpoint
if checkpoint_path and truncate_flag:
checkpoint = torch.load(f=checkpoint_path, map_location="cpu")
job_id = checkpoint["job_id"]
truncate_epoch = checkpoint["epoch"]
# only use job_id from checkpoint
elif checkpoint_path:
checkpoint = torch.load(f=checkpoint_path, map_location="cpu")
job_id = checkpoint["job_id"]
# no checkpoint specified job_id might have been set manually
elif args.job_id:
job_id = args.job_id
# don't restrict epoch number in case it has not been specified yet
if not truncate_epoch:
truncate_epoch = float("inf")
entries, job_epochs = [], {}
if not args.search:
entries, job_epochs = Trace.grep_training_trace_entries(
tracefile=trace,
train=args.train,
test=args.test,
valid=args.valid,
example=args.example,
batch=args.batch,
job_id=job_id,
epoch_of_last=truncate_epoch,
)
if not entries and (args.search or not entry_type_specified):
entries = Trace.grep_entries(tracefile=trace, conjunctions=[f"scope: train"])
truncate_epoch = None
if entries:
args.search = True
if not entries and entry_type_specified:
sys.exit(
"No relevant trace entries found. If this was a trace from a search"
" job, dont use any of --train --valid --test."
)
elif not entries:
sys.exit("No relevant trace entries found.")
if args.list_keys:
all_trace_keys = set()
if not args.yaml:
csv_writer = csv.writer(sys.stdout)
# dict[new_name] = (lookup_name, where)
# if where=="config"/"trace" it will be looked up automatically
# if where=="sep" it must be added in in the write loop separately
if args.no_default_keys:
default_attributes = OrderedDict()
else:
default_attributes = OrderedDict(
[
("job_id", ("job_id", "sep")),
("dataset", ("dataset.name", "config")),
("model", ("model", "sep")),
("reciprocal", ("reciprocal", "sep")),
("job", ("job", "sep")),
("job_type", ("type", "trace")),
("split", ("split", "sep")),
("epoch", ("epoch", "trace")),
("avg_loss", ("avg_loss", "trace")),
("avg_penalty", ("avg_penalty", "trace")),
("avg_cost", ("avg_cost", "trace")),
("metric_name", ("valid.metric", "config")),
("metric", ("metric", "sep")),
]
)
if args.search:
default_attributes["child_folder"] = ("folder", "trace")
default_attributes["child_job_id"] = ("child_job_id", "sep")
if not (args.no_header or args.list_keys):
csv_writer.writerow(
list(default_attributes.keys()) + [key for key in keymap.keys()]
)
# store configs for job_id's s.t. they need to be loaded only once
configs = {}
warning_shown = False
for entry in entries:
current_epoch = entry.get("epoch")
job_type = entry.get("job")
job_id = entry.get("job_id")
if truncate_epoch and not current_epoch <= float(truncate_epoch):
continue
# filter out entries not relevant to the unique training sequence determined
# by the options; not relevant for search
if job_type == "train":
if current_epoch > job_epochs[job_id]:
continue
elif job_type == "eval":
if "resumed_from_job_id" in entry:
if current_epoch > job_epochs[entry.get("resumed_from_job_id")]:
continue
elif "parent_job_id" in entry:
if current_epoch > job_epochs[entry.get("parent_job_id")]:
continue
# find relevant config file
child_job_id = entry.get("child_job_id") if "child_job_id" in entry else None
config_key = (
entry.get("folder") + "/" + str(child_job_id) if args.search else job_id
)
if config_key in configs.keys():
config = configs[config_key]
else:
if args.search:
if not child_job_id and not warning_shown:
# This warning is from Dec 19, 2019. TODO remove
print(
"Warning: You are dumping the trace of an older search job. "
"This is fine only if "
"the config.yaml files in each subfolder have not been modified "
"after running the corresponding training job.",
file=sys.stderr,
)
warning_shown = True
config = get_config_for_job_id(
child_job_id, os.path.join(folder_path, entry.get("folder"))
)
entry["type"] = config.get("train.type")
else:
config = get_config_for_job_id(job_id, folder_path)
configs[config_key] = config
if args.list_keys:
all_trace_keys.update(entry.keys())
continue
new_attributes = OrderedDict()
# when training was reciprocal, use the base_model as model
if config.get_default("model") == "reciprocal_relations_model":
model = config.get_default("reciprocal_relations_model.base_model.type")
# the string that substitutes $base_model in keymap if it exists
subs_model = "reciprocal_relations_model.base_model"
reciprocal = 1
else:
model = config.get_default("model")
subs_model = model
reciprocal = 0
# search for the additional keys from --keys and --keysfile
for new_key in keymap.keys():
lookup = keymap[new_key]
# search for special keys
value = None
if lookup == "$folder":
value = os.path.abspath(folder_path)
elif lookup == "$checkpoint" and checkpoint_path:
value = os.path.abspath(checkpoint_path)
elif lookup == "$machine":
value = socket.gethostname()
if "$base_model" in lookup:
lookup = lookup.replace("$base_model", subs_model)
# search for ordinary keys; start searching in trace entry then config
if not value:
value = entry.get(lookup)
if not value:
try:
value = config.get_default(lookup)
except:
pass # value stays None; creates empty field in csv
if value and isinstance(value, bool):
value = 1
elif not value and isinstance(value, bool):
value = 0
new_attributes[new_key] = value
if not args.yaml:
# find the actual values for the default attributes
actual_default = default_attributes.copy()
for new_key in default_attributes.keys():
lookup, where = default_attributes[new_key]
if where == "config":
actual_default[new_key] = config.get(lookup)
elif where == "trace":
actual_default[new_key] = entry.get(lookup)
# keys with separate treatment
# "split" in {train,test,valid} for the datatype
# "job" in {train,eval,valid,search}
if job_type == "train":
if "split" in entry:
actual_default["split"] = entry.get("split")
else:
actual_default["split"] = "train"
actual_default["job"] = "train"
elif job_type == "eval":
if "split" in entry:
actual_default["split"] = entry.get("split") # test or valid
else:
# deprecated
actual_default["split"] = entry.get("data") # test or valid
if entry.get("resumed_from_job_id"):
actual_default["job"] = "eval" # from "kge eval"
else:
actual_default["job"] = "valid" # child of training job
else:
actual_default["job"] = job_type
if "split" in entry:
actual_default["split"] = entry.get("split")
else:
# deprecated
actual_default["split"] = entry.get("data") # test or valid
actual_default["job_id"] = job_id.split("-")[0]
actual_default["model"] = model
actual_default["reciprocal"] = reciprocal
# lookup name is in config value is in trace
actual_default["metric"] = entry.get(config.get_default("valid.metric"))
if args.search:
actual_default["child_job_id"] = entry.get("child_job_id").split("-")[0]
for key in list(actual_default.keys()):
if key not in default_attributes:
del actual_default[key]
csv_writer.writerow(
[actual_default[new_key] for new_key in actual_default.keys()]
+ [new_attributes[new_key] for new_key in new_attributes.keys()]
)
else:
entry.update({"reciprocal": reciprocal, "model": model})
if keymap:
entry.update(new_attributes)
print(entry)
if args.list_keys:
# only one config needed
config = configs[list(configs.keys())[0]]
options = Config.flatten(config.options)
options = sorted(
filter(lambda opt: "+++" not in opt, options), key=lambda opt: opt.lower()
)
if isinstance(args.list_keys, bool):
sep = ", "
else:
sep = args.list_keys
print("Default keys for CSV: ")
print(*default_attributes.keys(), sep=sep)
print("")
print("Special keys: ")
print(*["$folder", "$checkpoint", "$machine", "$base_model"], sep=sep)
print("")
print("Keys found in trace: ")
print(*sorted(all_trace_keys), sep=sep)
print("")
print("Keys found in config: ")
print(*options, sep=sep)
### DUMP CONFIG ########################################################################
def _add_dump_config_parser(subparsers_dump):
parser_dump_config = subparsers_dump.add_parser(
"config", help=("Dump a configuration")
)
parser_dump_config.add_argument(
"source",
help="A path to either a checkpoint, a config file, or a job folder.",
nargs="?",
default=".",
)
parser_dump_config.add_argument(
"--minimal",
"-m",
default=False,
action="store_const",
const=True,
help="Only dump configuration options different from the default configuration (default)",
)
parser_dump_config.add_argument(
"--raw",
"-r",
default=False,
action="store_const",
const=True,
help="Dump the config as is",
)
parser_dump_config.add_argument(
"--full",
"-f",
default=False,
action="store_const",
const=True,
help="Add all values from the default configuration before dumping the config",
)
parser_dump_config.add_argument(
"--include",
"-i",
type=str,
nargs="*",
help="List of keys to include (separated by space). "
"All subkeys are also included. Cannot be used with --raw.",
)
parser_dump_config.add_argument(
"--exclude",
"-e",
type=str,
nargs="*",
help="List of keys to exclude (separated by space). "
"All subkeys are also exluded. Applied after --include. "
"Cannot be used with --raw.",
)
def _dump_config(args):
"""Execute the 'dump config' command."""
if not (args.raw or args.full or args.minimal):
args.minimal = True
if args.raw + args.full + args.minimal != 1:
raise ValueError("Exactly one of --raw, --full, or --minimal must be set")
if args.raw and (args.include or args.exclude):
raise ValueError(
"--include and --exclude cannot be used with --raw "
"(use --full or --minimal instead)."
)
config = Config()
config_file = None
if os.path.isdir(args.source):
config_file = os.path.join(args.source, "config.yaml")
config.load(config_file)
elif ".yaml" in os.path.split(args.source)[-1]:
config_file = args.source
config.load(config_file)
else: # a checkpoint
checkpoint = torch.load(args.source, map_location="cpu")
if args.raw:
config = checkpoint["config"]
else:
config.load_config(checkpoint["config"])
def print_options(options):
# drop all arguments that are not included
if args.include:
args.include = set(args.include)
options_copy = copy.deepcopy(options)
for key in options_copy.keys():
prefix = key
keep = False
while True:
if prefix in args.include:
keep = True
break
else:
last_dot_index = prefix.rfind(".")
if last_dot_index < 0:
break
else:
prefix = prefix[:last_dot_index]
if not keep:
del options[key]
# remove all arguments that are excluded
if args.exclude:
args.exclude = set(args.exclude)
options_copy = copy.deepcopy(options)
for key in options_copy.keys():
prefix = key
while True:
if prefix in args.exclude:
del options[key]
break
else:
last_dot_index = prefix.rfind(".")
if last_dot_index < 0:
break
else:
prefix = prefix[:last_dot_index]
# convert the remaining options to a Config and print it
config = Config(load_default=False)
config.set_all(options, create=True)
print(yaml.dump(config.options))
if args.raw:
if config_file:
with open(config_file, "r") as f:
print(f.read())
else:
print_options(config.options)
elif args.full:
print_options(config.options)
else: # minimal
default_config = Config()
imports = config.get("import")
if imports is not None:
if not isinstance(imports, list):
imports = [imports]
for module_name in imports:
default_config._import(module_name)
default_options = Config.flatten(default_config.options)
new_options = Config.flatten(config.options)
minimal_options = {}
for option, value in new_options.items():
if option not in default_options or default_options[option] != value:
minimal_options[option] = value
# always retain all imports
if imports is not None:
minimal_options["import"] = list(set(imports))
print_options(minimal_options)
| [
"torch.load"
] | 1.7.1 | alexgaskell10/encoded_kge | 2959c058125515a3e0e0b811ffe8086d6699006c |
1.3 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from stanza.models.common.biaffine import BiaffineScorer
from stanza.models.common.hlstm import HighwayLSTM
from stanza.models.common.dropout import WordDropout
from stanza.models.common.vocab import CompositeVocab
from stanza.models.common.char_model import CharacterModel
class Tagger(nn.Module):
def __init__(self, args, vocab, emb_matrix=None, share_hid=False):
super().__init__()
self.vocab = vocab
self.args = args
self.share_hid = share_hid
self.unsaved_modules = []
def add_unsaved_module(name, module):
self.unsaved_modules += [name]
setattr(self, name, module)
# input layers
input_size = 0
if self.args['word_emb_dim'] > 0:
# frequent word embeddings
self.word_emb = nn.Embedding(len(vocab['word']), self.args['word_emb_dim'], padding_idx=0)
input_size += self.args['word_emb_dim']
if not share_hid:
# upos embeddings
self.upos_emb = nn.Embedding(len(vocab['upos']), self.args['tag_emb_dim'], padding_idx=0)
if self.args['char'] and self.args['char_emb_dim'] > 0:
self.charmodel = CharacterModel(args, vocab)
self.trans_char = nn.Linear(self.args['char_hidden_dim'], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
if self.args['pretrain']:
# pretrained embeddings, by default this won't be saved into model file
add_unsaved_module('pretrained_emb', nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix), freeze=True))
self.trans_pretrained = nn.Linear(emb_matrix.shape[1], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
# recurrent layers
self.taggerlstm = HighwayLSTM(input_size, self.args['hidden_dim'], self.args['num_layers'], batch_first=True, bidirectional=True, dropout=self.args['dropout'], rec_dropout=self.args['rec_dropout'], highway_func=torch.tanh)
self.drop_replacement = nn.Parameter(torch.randn(input_size) / np.sqrt(input_size))
self.taggerlstm_h_init = nn.Parameter(torch.zeros(2 * self.args['num_layers'], 1, self.args['hidden_dim']))
self.taggerlstm_c_init = nn.Parameter(torch.zeros(2 * self.args['num_layers'], 1, self.args['hidden_dim']))
# classifiers
self.upos_hid = nn.Linear(self.args['hidden_dim'] * 2, self.args['deep_biaff_hidden_dim'])
self.upos_clf = nn.Linear(self.args['deep_biaff_hidden_dim'], len(vocab['upos']))
self.upos_clf.weight.data.zero_()
self.upos_clf.bias.data.zero_()
if share_hid:
clf_constructor = lambda insize, outsize: nn.Linear(insize, outsize)
else:
self.xpos_hid = nn.Linear(self.args['hidden_dim'] * 2, self.args['deep_biaff_hidden_dim'] if not isinstance(vocab['xpos'], CompositeVocab) else self.args['composite_deep_biaff_hidden_dim'])
self.ufeats_hid = nn.Linear(self.args['hidden_dim'] * 2, self.args['composite_deep_biaff_hidden_dim'])
clf_constructor = lambda insize, outsize: BiaffineScorer(insize, self.args['tag_emb_dim'], outsize)
if isinstance(vocab['xpos'], CompositeVocab):
self.xpos_clf = nn.ModuleList()
for l in vocab['xpos'].lens():
self.xpos_clf.append(clf_constructor(self.args['composite_deep_biaff_hidden_dim'], l))
else:
self.xpos_clf = clf_constructor(self.args['deep_biaff_hidden_dim'], len(vocab['xpos']))
if share_hid:
self.xpos_clf.weight.data.zero_()
self.xpos_clf.bias.data.zero_()
self.ufeats_clf = nn.ModuleList()
for l in vocab['feats'].lens():
if share_hid:
self.ufeats_clf.append(clf_constructor(self.args['deep_biaff_hidden_dim'], l))
self.ufeats_clf[-1].weight.data.zero_()
self.ufeats_clf[-1].bias.data.zero_()
else:
self.ufeats_clf.append(clf_constructor(self.args['composite_deep_biaff_hidden_dim'], l))
# criterion
self.crit = nn.CrossEntropyLoss(ignore_index=0) # ignore padding
self.drop = nn.Dropout(args['dropout'])
self.worddrop = WordDropout(args['word_dropout'])
def forward(self, word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, word_orig_idx, sentlens, wordlens):
def pack(x):
return pack_padded_sequence(x, sentlens, batch_first=True)
inputs = []
if self.args['word_emb_dim'] > 0:
word_emb = self.word_emb(word)
word_emb = pack(word_emb)
inputs += [word_emb]
if self.args['pretrain']:
pretrained_emb = self.pretrained_emb(pretrained)
pretrained_emb = self.trans_pretrained(pretrained_emb)
pretrained_emb = pack(pretrained_emb)
inputs += [pretrained_emb]
def pad(x):
return pad_packed_sequence(PackedSequence(x, word_emb.batch_sizes), batch_first=True)[0]
if self.args['char'] and self.args['char_emb_dim'] > 0:
char_reps = self.charmodel(wordchars, wordchars_mask, word_orig_idx, sentlens, wordlens)
char_reps = PackedSequence(self.trans_char(self.drop(char_reps.data)), char_reps.batch_sizes)
inputs += [char_reps]
lstm_inputs = torch.cat([x.data for x in inputs], 1)
lstm_inputs = self.worddrop(lstm_inputs, self.drop_replacement)
lstm_inputs = self.drop(lstm_inputs)
lstm_inputs = PackedSequence(lstm_inputs, inputs[0].batch_sizes)
lstm_outputs, _ = self.taggerlstm(lstm_inputs, sentlens, hx=(self.taggerlstm_h_init.expand(2 * self.args['num_layers'], word.size(0), self.args['hidden_dim']).contiguous(), self.taggerlstm_c_init.expand(2 * self.args['num_layers'], word.size(0), self.args['hidden_dim']).contiguous()))
lstm_outputs = lstm_outputs.data
upos_hid = F.relu(self.upos_hid(self.drop(lstm_outputs)))
upos_pred = self.upos_clf(self.drop(upos_hid))
preds = [pad(upos_pred).max(2)[1]]
upos = pack(upos).data
loss = self.crit(upos_pred.view(-1, upos_pred.size(-1)), upos.view(-1))
if self.share_hid:
xpos_hid = upos_hid
ufeats_hid = upos_hid
clffunc = lambda clf, hid: clf(self.drop(hid))
else:
xpos_hid = F.relu(self.xpos_hid(self.drop(lstm_outputs)))
ufeats_hid = F.relu(self.ufeats_hid(self.drop(lstm_outputs)))
if self.training:
upos_emb = self.upos_emb(upos)
else:
upos_emb = self.upos_emb(upos_pred.max(1)[1])
clffunc = lambda clf, hid: clf(self.drop(hid), self.drop(upos_emb))
xpos = pack(xpos).data
if isinstance(self.vocab['xpos'], CompositeVocab):
xpos_preds = []
for i in range(len(self.vocab['xpos'])):
xpos_pred = clffunc(self.xpos_clf[i], xpos_hid)
loss += self.crit(xpos_pred.view(-1, xpos_pred.size(-1)), xpos[:, i].view(-1))
xpos_preds.append(pad(xpos_pred).max(2, keepdim=True)[1])
preds.append(torch.cat(xpos_preds, 2))
else:
xpos_pred = clffunc(self.xpos_clf, xpos_hid)
loss += self.crit(xpos_pred.view(-1, xpos_pred.size(-1)), xpos.view(-1))
preds.append(pad(xpos_pred).max(2)[1])
ufeats_preds = []
ufeats = pack(ufeats).data
for i in range(len(self.vocab['feats'])):
ufeats_pred = clffunc(self.ufeats_clf[i], ufeats_hid)
loss += self.crit(ufeats_pred.view(-1, ufeats_pred.size(-1)), ufeats[:, i].view(-1))
ufeats_preds.append(pad(ufeats_pred).max(2, keepdim=True)[1])
preds.append(torch.cat(ufeats_preds, 2))
return loss, preds
| [
"torch.nn.Linear",
"torch.nn.utils.rnn.PackedSequence",
"torch.nn.Dropout",
"torch.cat",
"torch.zeros",
"torch.nn.ModuleList",
"torch.from_numpy",
"torch.randn",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.CrossEntropyLoss"
] | 1.3.0 | danielhers/stanza | d747a7b781da203c286ec51e3842fecb8b0abb15 |
0.4 |
### execute this function to train and test the vae-model
from vaemodel import Model
import numpy as np
import pickle
import torch
import os
import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset')
parser.add_argument('--num_shots',type=int)
parser.add_argument('--generalized', type = str2bool)
args = parser.parse_args()
########################################
# the basic hyperparameters
########################################
hyperparameters = {
'num_shots': 0,
'device': 'cuda',
'model_specifics': {'cross_reconstruction': True,
'name': 'CADA',
'distance': 'wasserstein',
'warmup': {'beta': {'factor': 0.25,
'end_epoch': 93,
'start_epoch': 0},
'cross_reconstruction': {'factor': 2.37,
'end_epoch': 75,
'start_epoch': 21},
'distance': {'factor': 8.13,
'end_epoch': 22,
'start_epoch': 6}}},
'lr_gen_model': 0.00015,
'generalized': True,
'batch_size': 50,
'xyu_samples_per_class': {'SUN': (200, 0, 400, 0),
'APY': (200, 0, 400, 0),
'CUB': (200, 0, 400, 0),
'AWA2': (200, 0, 400, 0),
'FLO': (200, 0, 400, 0),
'plant': (200, 0, 400, 0),
'AWA1': (200, 0, 400, 0)},
'epochs': 100,
'loss': 'l1',
'auxiliary_data_source' : 'attributes',
'lr_cls': 0.001,
'dataset': 'CUB',
'hidden_size_rule': {'resnet_features': (1560, 1660),
'attributes': (1450, 665),
'sentences': (1450, 665) },
'latent_size': 64
}
# The training epochs for the final classifier, for early stopping,
# as determined on the validation spit
cls_train_steps = [
{'dataset': 'SUN', 'num_shots': 0, 'generalized': True, 'cls_train_steps': 21},
{'dataset': 'SUN', 'num_shots': 0, 'generalized': False, 'cls_train_steps': 30},
{'dataset': 'SUN', 'num_shots': 1, 'generalized': True, 'cls_train_steps': 22},
{'dataset': 'SUN', 'num_shots': 1, 'generalized': False, 'cls_train_steps': 96},
{'dataset': 'SUN', 'num_shots': 5, 'generalized': True, 'cls_train_steps': 29},
{'dataset': 'SUN', 'num_shots': 5, 'generalized': False, 'cls_train_steps': 78},
{'dataset': 'SUN', 'num_shots': 2, 'generalized': True, 'cls_train_steps': 29},
{'dataset': 'SUN', 'num_shots': 2, 'generalized': False, 'cls_train_steps': 61},
{'dataset': 'SUN', 'num_shots': 10, 'generalized': True, 'cls_train_steps': 79},
{'dataset': 'SUN', 'num_shots': 10, 'generalized': False, 'cls_train_steps': 94},
{'dataset': 'AWA1', 'num_shots': 0, 'generalized': True, 'cls_train_steps': 33},
{'dataset': 'AWA1', 'num_shots': 0, 'generalized': False, 'cls_train_steps': 25},
{'dataset': 'AWA1', 'num_shots': 1, 'generalized': True, 'cls_train_steps': 40},
{'dataset': 'AWA1', 'num_shots': 1, 'generalized': False, 'cls_train_steps': 81},
{'dataset': 'AWA1', 'num_shots': 5, 'generalized': True, 'cls_train_steps': 89},
{'dataset': 'AWA1', 'num_shots': 5, 'generalized': False, 'cls_train_steps': 62},
{'dataset': 'AWA1', 'num_shots': 2, 'generalized': True, 'cls_train_steps': 56},
{'dataset': 'AWA1', 'num_shots': 2, 'generalized': False, 'cls_train_steps': 59},
{'dataset': 'AWA1', 'num_shots': 10, 'generalized': True, 'cls_train_steps': 100},
{'dataset': 'AWA1', 'num_shots': 10, 'generalized': False, 'cls_train_steps': 50},
{'dataset': 'CUB', 'num_shots': 0, 'generalized': True, 'cls_train_steps': 23},
{'dataset': 'CUB', 'num_shots': 0, 'generalized': False, 'cls_train_steps': 22},
{'dataset': 'CUB', 'num_shots': 1, 'generalized': True, 'cls_train_steps': 34},
{'dataset': 'CUB', 'num_shots': 1, 'generalized': False, 'cls_train_steps': 46},
{'dataset': 'CUB', 'num_shots': 5, 'generalized': True, 'cls_train_steps': 64},
{'dataset': 'CUB', 'num_shots': 5, 'generalized': False, 'cls_train_steps': 73},
{'dataset': 'CUB', 'num_shots': 2, 'generalized': True, 'cls_train_steps': 39},
{'dataset': 'CUB', 'num_shots': 2, 'generalized': False, 'cls_train_steps': 31},
{'dataset': 'CUB', 'num_shots': 10, 'generalized': True, 'cls_train_steps': 85},
{'dataset': 'CUB', 'num_shots': 10, 'generalized': False, 'cls_train_steps': 67},
{'dataset': 'AWA2', 'num_shots': 0, 'generalized': True, 'cls_train_steps': 29},
{'dataset': 'AWA2', 'num_shots': 0, 'generalized': False, 'cls_train_steps': 39},
{'dataset': 'AWA2', 'num_shots': 1, 'generalized': True, 'cls_train_steps': 44},
{'dataset': 'AWA2', 'num_shots': 1, 'generalized': False, 'cls_train_steps': 96},
{'dataset': 'AWA2', 'num_shots': 5, 'generalized': True, 'cls_train_steps': 99},
{'dataset': 'AWA2', 'num_shots': 5, 'generalized': False, 'cls_train_steps': 100},
{'dataset': 'AWA2', 'num_shots': 2, 'generalized': True, 'cls_train_steps': 69},
{'dataset': 'AWA2', 'num_shots': 2, 'generalized': False, 'cls_train_steps': 79},
{'dataset': 'AWA2', 'num_shots': 10, 'generalized': True, 'cls_train_steps': 86},
{'dataset': 'AWA2', 'num_shots': 10, 'generalized': False, 'cls_train_steps': 78},
{'dataset': 'plant', 'num_shots': 0, 'generalized': True, 'cls_train_steps': 86}
]
##################################
# change some hyperparameters here
##################################
hyperparameters['dataset'] = args.dataset
hyperparameters['num_shots']= args.num_shots
hyperparameters['generalized']= args.generalized
hyperparameters['cls_train_steps'] = [x['cls_train_steps'] for x in cls_train_steps
if all([hyperparameters['dataset']==x['dataset'],
hyperparameters['num_shots']==x['num_shots'],
hyperparameters['generalized']==x['generalized'] ])][0]
print('***')
print(hyperparameters['cls_train_steps'] )
if hyperparameters['generalized']:
if hyperparameters['num_shots']==0:
hyperparameters['samples_per_class'] = {'CUB': (200, 0, 400, 0), 'SUN': (200, 0, 400, 0),
'APY': (200, 0, 400, 0), 'AWA1': (200, 0, 400, 0),
'AWA2': (200, 0, 400, 0), 'FLO': (200, 0, 400, 0), 'plant': (200, 0, 400, 0)}
else:
hyperparameters['samples_per_class'] = {'CUB': (200, 0, 200, 200), 'SUN': (200, 0, 200, 200),
'APY': (200, 0, 200, 200), 'AWA1': (200, 0, 200, 200),
'AWA2': (200, 0, 200, 200), 'FLO': (200, 0, 200, 200)}
else:
if hyperparameters['num_shots']==0:
hyperparameters['samples_per_class'] = {'CUB': (0, 0, 200, 0), 'SUN': (0, 0, 200, 0),
'APY': (0, 0, 200, 0), 'AWA1': (0, 0, 200, 0),
'AWA2': (0, 0, 200, 0), 'FLO': (0, 0, 200, 0)}
else:
hyperparameters['samples_per_class'] = {'CUB': (0, 0, 200, 200), 'SUN': (0, 0, 200, 200),
'APY': (0, 0, 200, 200), 'AWA1': (0, 0, 200, 200),
'AWA2': (0, 0, 200, 200), 'FLO': (0, 0, 200, 200)}
model = Model( hyperparameters)
model.to(hyperparameters['device'])
"""
########################################
### load model where u left
########################################
saved_state = torch.load('./saved_models/CADA_trained.pth.tar')
model.load_state_dict(saved_state['state_dict'])
for d in model.all_data_sources_without_duplicates:
model.encoder[d].load_state_dict(saved_state['encoder'][d])
model.decoder[d].load_state_dict(saved_state['decoder'][d])
########################################
"""
losses = model.train_vae()
u,s,h,history = model.train_classifier()
if hyperparameters['generalized']==True:
acc = [hi[2] for hi in history]
elif hyperparameters['generalized']==False:
acc = [hi[1] for hi in history]
print(acc[-1])
state = {
'state_dict': model.state_dict() ,
'hyperparameters':hyperparameters,
'encoder':{},
'decoder':{}
}
for d in model.all_data_sources:
state['encoder'][d] = model.encoder[d].state_dict()
state['decoder'][d] = model.decoder[d].state_dict()
torch.save(state, 'CADA_trained.pth.tar')
print('>> saved')
| [
"torch.save"
] | 0.4.1 | sanixa/CADA-VAE-pytorch | 9383c3067ce84f351c72a285d6da5724dcd710a6 |
1.4 | import torch
import argparse
import os
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from auto_LiRPA import BoundedModule, BoundedTensor
from auto_LiRPA.perturbations import *
parser = argparse.ArgumentParser()
args, unknown = parser.parse_known_args()
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view((x.shape[0], -1))
class cnn_model(nn.Module):
def __init__(self, layers, padding, stride):
super(cnn_model, self).__init__()
self.module_list = []
channel = 1
length = 28
for i in range(layers):
self.module_list.append(nn.Conv2d(channel, 3, 4, stride = stride, padding = padding))
channel = 3
length = (length + 2 * padding - 4)//stride + 1
assert length > 0
self.module_list.append(nn.ReLU())
self.module_list.append(Flatten())
self.module_list.append(nn.Linear(3 * length * length, 256))
self.module_list.append(nn.Linear(256, 10))
self.model = nn.Sequential(*self.module_list)
def forward(self, x):
x = self.model(x)
return x
def test():
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
models = [2, 3]
paddings = [1, 2]
strides = [1, 3]
N = 2
n_classes = 10
image = torch.randn(N, 1, 28, 28)
image = image.to(torch.float32) / 255.0
for layer_num in models:
for padding in paddings:
for stride in strides:
# print(layer_num, padding, stride)
try:
model_ori = cnn_model(layer_num, padding, stride)
except:
continue
model = BoundedModule(model_ori, torch.empty_like(image), device="cpu", bound_opts={"conv_mode": "patches"})
eps = 0.3
norm = np.inf
ptb = PerturbationLpNorm(norm=norm, eps=eps)
image = BoundedTensor(image, ptb)
pred = model(image)
lb, ub = model.compute_bounds()
model = BoundedModule(model_ori, torch.empty_like(image), device="cpu", bound_opts={"conv_mode": "matrix"})
pred = model(image)
lb_ref, ub_ref = model.compute_bounds()
assert lb.shape == ub.shape == torch.Size((N, n_classes))
assert torch.allclose(lb, lb_ref)
assert torch.allclose(ub, ub_ref)
# print("passed")
if __name__ == '__main__':
test() | [
"torch.nn.Linear",
"torch.Size",
"torch.cuda.manual_seed_all",
"torch.nn.Sequential",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.allclose",
"torch.randn",
"torch.empty_like"
] | 1.4 | Qiming-Wu/auto_LiRPA | 7e1fbf12d857ef8d411d80eef1bd73d9ae4ba3be |
1.9 | import settings
import pandas as pd
from loader.DWY_Neighbor import NeighborsLoader
from loader.DBP15k import DBP15kLoader
from script.preprocess.get_token import Token
from settings import *
import numpy as np
import torch
class NeighborToken(object):
def __init__(self, dbpToken, loader):
self.loader = loader
self.id_features_dict = dbpToken.id_features_dict
self.id_adj_tensor_dict = {}
def get_adj(valid_len):
adj = torch.zeros(NEIGHBOR_SIZE, NEIGHBOR_SIZE).bool()
for i in range(0, valid_len):
adj[i, i] = 1
adj[0, i] = 1
adj[i, 0] = 1
return adj
def get_token():
id_neighbors_dict = {}
loader_id_neighbors_dict_copy = self.loader.id_neighbors_dict
for entity_id, neighbors_dict in loader_id_neighbors_dict_copy.items():
id_neighbors_dict[entity_id]=[]
id_neighbors_dict[entity_id].append(self.id_features_dict[entity_id])
for rel, neighbor in neighbors_dict.items():
for neigh in neighbor:
id_neighbors_dict[entity_id].append(self.id_features_dict[neigh])
for k, v in id_neighbors_dict.items():
if len(v) < NEIGHBOR_SIZE:
self.id_adj_tensor_dict[k] = get_adj(len(v))
id_neighbors_dict[k] = v + [[ord(' ')]*TOKEN_LEN] * (NEIGHBOR_SIZE - len(v))
else:
self.id_adj_tensor_dict[k] = get_adj(NEIGHBOR_SIZE)
id_neighbors_dict[k] = v[:NEIGHBOR_SIZE]
return id_neighbors_dict
self.id_neighbors_dict = get_token()
| [
"torch.zeros"
] | 1.9.0 | Yasuo-orphan/SelfKG | 52f71c186ab4ad2db8de6cadf4e498d6e563ee96 |
1.6 | import numpy as np
import torch
import torch.nn as nn
from ..loss import *
class Criterion(object):
def __init__(self, device=0, target_opt=['1'], loss_opt=[['WeightedBCE']], loss_weight=[[1.]], regu_opt=[], regu_weight=[]):
self.device = device
self.target_opt = target_opt
self.loss_opt = loss_opt
self.loss_weight = loss_weight
self.num_target = len(target_opt)
self.num_regu = len(regu_opt)
self.loss = self.get_loss()
self.loss_w = loss_weight
self.regu = self.get_regu(regu_opt)
self.regu_w = regu_weight
def get_regu(self, regu_opt=[]):
regu = None
if len(regu_opt)>0:
regu = [None]*len(regu_opt)
for i in range(len(regu_opt)):
if regu_opt[i] == 0:
regu[i] = nn.L1Loss()
elif regu_opt[i] == 'BinaryReg':
regu[i] = BinaryReg()
return regu
def get_loss(self):
out = [None]*self.num_target
for i in range(self.num_target):
out[i] = [None]*len(self.loss_opt[i])
for j,lopt in enumerate(self.loss_opt[i]):
if lopt == 'WeightedMSE':
out[i][j] = WeightedMSE()
elif lopt == 'WeightedBCE':
out[i][j] = WeightedBCE()
elif lopt == 'JaccardLoss':
out[i][j] = JaccardLoss()
elif lopt == 'DiceLoss':
out[i][j] = DiceLoss()
elif lopt == 'WeightedCE':
out[i][j] = WeightedCE()
else:
print('Unknown loss option {}'.format(lopt))
return out
def to_torch(self, data):
return torch.from_numpy(data).to(self.device)
def eval(self, pred, target, weight):
# target, weight: numpy
# pred: torch
# compute loss
loss = 0
cid = 0 # channel index for prediction
for i in range(self.num_target):
# for each target
numC = self.get_num_channel(i, target)
target_t = self.to_torch(target[i])
for j in range(len(self.loss[i])):
if weight[i][j].shape[-1] == 1: # placeholder for no weight
loss += self.loss_weight[i][j]*self.loss_w[i][j]*self.loss[i][j](pred[:,cid:cid+numC], target_t)
else:
loss += self.loss_weight[i][j]*self.loss_w[i][j]*self.loss[i][j](pred[:,cid:cid+numC], target_t, self.to_torch(weight[i][j]))
cid += numC
for i in range(self.num_regu):
loss += self.regu[i](pred)*self.regu_w[i]
return loss
def get_num_channel(self, i, target):
topt = self.target_opt[i]
if topt[0] == '9': # generic segmantic segmentation
numC = topt.split('-')[1]
numC = int(numC)
else:
numC = target[i].shape[1]
return numC
| [
"torch.nn.L1Loss",
"torch.from_numpy"
] | 1.6.0 | matinraayai/pytorch_connectomics | b11a2f7e71a8d1442fb05f7a6edfaaaa7b0d9205 |
1.8 | import json
import torch
from nltk.tokenize import word_tokenize
from graph4nlp.pytorch.data.dataset import Text2TextDataItem, Text2TextDataset
from graph4nlp.pytorch.modules.utils.padding_utils import pad_2d_vals, pad_2d_vals_no_size
class CNNDataset(Text2TextDataset):
def __init__(
self,
root_dir,
topology_subdir,
graph_name,
static_or_dynamic="static",
tokenizer=word_tokenize,
lower_case=True,
pretrained_word_emb_name="840B",
pretrained_word_emb_url=None,
target_pretrained_word_emb_name=None,
target_pretrained_word_emb_url=None,
pretrained_word_emb_cache_dir=".vector_cache/",
use_val_for_vocab=False,
seed=1234,
thread_number=4,
port=9000,
timeout=15000,
edge_strategy=None,
share_vocab=True,
word_emb_size=300,
dynamic_init_graph_name=None,
dynamic_init_topology_builder=None,
dynamic_init_topology_aux_args=None,
for_inference=False,
reused_vocab_model=None,
**kwargs
):
super(CNNDataset, self).__init__(
root_dir=root_dir,
topology_subdir=topology_subdir,
graph_name=graph_name,
static_or_dynamic=static_or_dynamic,
tokenizer=tokenizer,
lower_case=lower_case,
pretrained_word_emb_name=pretrained_word_emb_name,
pretrained_word_emb_url=pretrained_word_emb_url,
target_pretrained_word_emb_name=target_pretrained_word_emb_name,
target_pretrained_word_emb_url=target_pretrained_word_emb_url,
pretrained_word_emb_cache_dir=pretrained_word_emb_cache_dir,
use_val_for_vocab=use_val_for_vocab,
seed=seed,
thread_number=thread_number,
port=port,
timeout=timeout,
edge_strategy=edge_strategy,
share_vocab=share_vocab,
word_emb_size=word_emb_size,
dynamic_init_graph_name=dynamic_init_graph_name,
dynamic_init_topology_builder=dynamic_init_topology_builder,
dynamic_init_topology_aux_args=dynamic_init_topology_aux_args,
for_inference=for_inference,
reused_vocab_model=reused_vocab_model,
**kwargs
)
@property
def raw_file_names(self):
"""
3 reserved keys: 'train', 'val' (optional), 'test'.
Represent the split of dataset.
"""
return {"train": "train_3w.json", "val": "val.json", "test": "test.json"}
@property
def processed_file_names(self):
"""At least 2 reserved keys should be fiiled: 'vocab' and 'data'."""
return {"vocab": "vocab.pt", "data": "data.pt"}
def download(self):
return
def parse_file(self, file_path):
"""
Read and parse the file specified by `file_path`. The file format is
specified by each individual task-specific base class. Returns all
the indices of data items in this file w.r.t. the whole dataset.
For Text2TextDataset, the format of the input file should contain
lines of input, each line representing one record of data. The input
and output is separated by a tab(\t).
Parameters
----------
file_path: str
The path of the input file.
Returns
-------
list
The indices of data items in the file w.r.t. the whole dataset.
"""
data = []
with open(file_path, "r") as f:
examples = json.load(f)
for example_dict in examples:
input = " ".join(" ".join(example_dict["article"]).split()[:400]).lower()
output = " ".join(
" ".join(
["<t> " + sent[0] + " . </t>" for sent in example_dict["highlight"]]
).split()[:99]
).lower()
if input == "" or output == "":
continue
data_item = Text2TextDataItem(
input_text=input,
output_text=output,
tokenizer=self.tokenizer,
share_vocab=self.share_vocab,
)
data.append(data_item)
return data
@staticmethod
def collate_fn(data_list: [Text2TextDataItem]):
graph_data = [item.graph for item in data_list]
max_node_len = 0
for graph_item in graph_data:
max_node_len = max(max_node_len, graph_item.node_features["token_id"].size()[1])
for graph_item in graph_data:
token_id_numpy = graph_item.node_features["token_id"].numpy()
token_id_pad = pad_2d_vals(token_id_numpy, token_id_numpy.shape[0], max_node_len)
graph_item.node_features["token_id"] = torch.from_numpy(token_id_pad).long()
from graph4nlp.pytorch.data.data import to_batch
big_graph = to_batch(graph_data)
output_numpy = [item.output_np for item in data_list]
output_str = [item.output_text.lower().strip() for item in data_list]
output_pad = pad_2d_vals_no_size(output_numpy)
tgt_seq = torch.from_numpy(output_pad).long()
return {"graph_data": big_graph, "tgt_seq": tgt_seq, "output_str": output_str}
| [
"torch.from_numpy"
] | 1.8.0 | cminusQAQ/graph4nlp | d980e897131f1b9d3766750c06316d94749904fa |
1.8 | import torch
CORENLP_TIMEOUT_SIGNATURE = "CoreNLP request timed out. Your document may be too long."
def convert_adj_to_graph(graph, adj, reverse_adj, mask_off_val):
slides = (adj != mask_off_val).nonzero(as_tuple=False)
batch_nodes_tensor = torch.Tensor([0] + graph._batch_num_nodes).to(slides.device)
batch_prefix = batch_nodes_tensor.view(-1, 1).expand(-1, batch_nodes_tensor.shape[0])
batch_prefix = batch_prefix.triu().long().sum(0)
src = slides[:, 1] + batch_prefix.index_select(dim=0, index=slides[:, 0])
tgt = slides[:, 2] + batch_prefix.index_select(dim=0, index=slides[:, 0])
graph_data = graph
graph_data.remove_all_edges() # remove all existing edges
graph_data.add_edges(src.detach().cpu().numpy().tolist(), tgt.detach().cpu().numpy().tolist())
value = adj[slides[:, 0], slides[:, 1], slides[:, 2]]
reverse_value = reverse_adj[slides[:, 0], slides[:, 1], slides[:, 2]]
graph_data.edge_features["edge_weight"] = value
graph_data.edge_features["reverse_edge_weight"] = reverse_value
return graph_data
| [
"torch.Tensor"
] | 1.8.0 | cminusQAQ/graph4nlp | d980e897131f1b9d3766750c06316d94749904fa |
1.8 | import torch
import torch.nn as nn
from .....data.data import from_batch
from ..base import PoolingBase
class MaxPooling(PoolingBase):
r"""Apply max pooling over the nodes in the graph.
.. math::
r^{(i)} = \max_{k=1}^{N_i}\left( x^{(i)}_k \right)
"""
def __init__(self, dim=None, use_linear_proj=False):
super(MaxPooling, self).__init__()
if use_linear_proj:
assert dim is not None, "dim should be specified when use_linear_proj is set to True"
self.linear = nn.Linear(dim, dim, bias=False)
else:
self.linear = None
def forward(self, graph, feat):
r"""Compute max pooling.
Parameters
----------
graph : GraphData
The graph data.
feat : str
The feature field name.
Returns
-------
torch.Tensor
The output feature.
"""
graph_list = from_batch(graph)
output_feat = []
for g in graph_list:
feat_tensor = g.node_features[feat]
if self.linear is not None:
feat_tensor = self.linear(feat_tensor)
output_feat.append(torch.max(feat_tensor, dim=0)[0])
output_feat = torch.stack(output_feat, 0)
return output_feat
| [
"torch.nn.Linear",
"torch.stack",
"torch.max"
] | 1.8.0 | cminusQAQ/graph4nlp | d980e897131f1b9d3766750c06316d94749904fa |
1.5 | import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Normal
from rl_sandbox.constants import OBS_RMS, CPU
from rl_sandbox.model_architectures.actor_critics.actor_critic import ActorCritic
from rl_sandbox.model_architectures.shared import Conv2DEncoder, Flatten, Fuse, Split
from rl_sandbox.model_architectures.utils import construct_linear_layers
class EarlyFusionConv2DGaussianAC(ActorCritic):
def __init__(self,
img_dim,
scalar_feature_dim,
action_dim,
shared_layers,
shared_out_dim,
eps=1e-7,
device=torch.device(CPU),
normalize_obs=False,
normalize_value=False):
assert len(img_dim) == 4
super().__init__(obs_dim=scalar_feature_dim,
norm_dim=(0,),
device=device,
normalize_obs=normalize_obs,
normalize_value=normalize_value)
self._eps = eps
self._img_dim = img_dim
self._scalar_feature_dim = scalar_feature_dim
self.split = Split([int(np.product(img_dim)), scalar_feature_dim])
self.fuse = Fuse()
self.encoder = Conv2DEncoder(*img_dim[1:], shared_out_dim, shared_layers, nn.LayerNorm(50))
self.action_net = nn.Sequential(nn.Linear(shared_out_dim * self._img_dim[0] + scalar_feature_dim, 256),
nn.ReLU(),
nn.Linear(256, action_dim * 2))
self.value = nn.Sequential(nn.Linear(shared_out_dim * self._img_dim[0] + scalar_feature_dim, 256),
nn.ReLU(),
nn.Linear(256, 1))
self.to(self.device)
def forward(self, x, h, **kwargs):
batch_size = x.shape[0]
if self._scalar_feature_dim > 0:
(imgs, scalars) = self.split(x)
if hasattr(self, OBS_RMS):
scalars = self.obs_rms.normalize(scalars)
else:
imgs = x
scalars = torch.empty(batch_size, 0, device=self.device)
imgs = imgs.reshape(batch_size * self._img_dim[0], *self._img_dim[1:]).to(self.device)
z = self.encoder(imgs)
x = self.fuse((z.reshape(batch_size, -1), scalars.to(self.device)))
a_mean, a_log_std = torch.chunk(self.action_net(x), chunks=2, dim=1)
a_std = torch.nn.functional.softplus(a_log_std) + self._eps
dist = Normal(a_mean, a_std)
val = self.value(x)
return dist, val, h
| [
"torch.nn.Linear",
"torch.device",
"torch.nn.LayerNorm",
"torch.nn.functional.softplus",
"torch.distributions.Normal",
"torch.nn.ReLU",
"torch.empty"
] | 1.5.1 | chanb/rl_sandbox_public | e55f954a29880f83a5b0c3358badda4d900f1564 |
1.5 | import argparse
import numpy as np
import torch
import rl_sandbox.constants as c
import rl_sandbox.transforms.general_transforms as gt
from rl_sandbox.agents.random_agents import UniformContinuousAgent
from rl_sandbox.buffers.wrappers.torch_buffer import TorchBuffer
from rl_sandbox.envs.wrappers.action_repeat import ActionRepeatWrapper
from rl_sandbox.envs.wrappers.augment_action import AugmentActionWrapper
from rl_sandbox.envs.wrappers.frame_stack import FrameStackWrapper
from rl_sandbox.envs.wrappers.renderer import GymRenderer
from rl_sandbox.train.train_sac import train_sac
from rl_sandbox.model_architectures.actor_critics.fully_connected_soft_actor_critic import LSTMSquashedGaussianSAC
from rl_sandbox.model_architectures.layers_definition import VALUE_BASED_LINEAR_LAYERS
# This is for script run
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, required=True, help="Random seed")
args = parser.parse_args()
seed = args.seed
obs_dim = 15
action_dim = 3
min_action = -np.ones(action_dim)
max_action = np.ones(action_dim)
device = torch.device("cuda:0")
# device = torch.device(c.CPU)
action_repeat = 1
num_frames = 1
hidden_state_dim = 128
memory_size = 1000000
max_total_steps = 1000000
experiment_setting = {
# Auxiliary Tasks
c.AUXILIARY_TASKS: {},
# Buffer
c.BUFFER_PREPROCESSING: gt.AsType(),
c.BUFFER_SETTING: {
c.KWARGS: {
c.MEMORY_SIZE: memory_size,
c.OBS_DIM: (obs_dim,),
c.H_STATE_DIM: (hidden_state_dim * 2,),
c.ACTION_DIM: (action_dim,),
c.REWARD_DIM: (1,),
c.INFOS: {c.MEAN: ((action_dim,), np.float32),
c.VARIANCE: ((action_dim,), np.float32),
c.ENTROPY: ((action_dim,), np.float32),
c.LOG_PROB: ((1,), np.float32),
c.VALUE: ((1,), np.float32),
c.DISCOUNTING: ((1,), np.float32)},
c.BURN_IN_WINDOW: 19,
c.PADDING_FIRST: True,
c.CHECKPOINT_INTERVAL: 0,
c.CHECKPOINT_PATH: None,
},
c.STORAGE_TYPE: c.RAM,
c.BUFFER_WRAPPERS: [
{
c.WRAPPER: TorchBuffer,
c.KWARGS: {},
},
],
},
# Environment
c.ACTION_DIM: action_dim,
c.CLIP_ACTION: True,
c.ENV_SETTING: {
c.ENV_BASE: {
# c.ENV_NAME: "Hopper-v2"
c.ENV_NAME: "HopperBulletEnv-v0"
},
c.ENV_TYPE: c.GYM,
c.ENV_WRAPPERS: [
{
c.WRAPPER: GymRenderer,
c.KWARGS: {},
},
# {
# c.WRAPPER: AugmentActionWrapper,
# c.KWARGS: {
# c.ACTION_DIM: action_dim,
# }
# },
{
c.WRAPPER: ActionRepeatWrapper,
c.KWARGS: {
c.ACTION_REPEAT: action_repeat,
c.DISCOUNT_FACTOR: 1.,
c.ENABLE_DISCOUNTING: False,
}
},
{
c.WRAPPER: FrameStackWrapper,
c.KWARGS: {
c.NUM_FRAMES: num_frames,
}
}
]
},
c.MIN_ACTION: min_action,
c.MAX_ACTION: max_action,
c.OBS_DIM: obs_dim,
# Evaluation
c.EVALUATION_FREQUENCY: 5000,
c.EVALUATION_RENDER: False,
c.EVALUATION_RETURNS: [],
c.NUM_EVALUATION_EPISODES: 5,
# Exploration
c.EXPLORATION_STEPS: 1000,
c.EXPLORATION_STRATEGY: UniformContinuousAgent(min_action,
max_action,
np.random.RandomState(seed)),
# General
c.DEVICE: device,
c.SEED: seed,
# Load
c.LOAD_MODEL: False,
# Logging
c.PRINT_INTERVAL: 5000,
c.SAVE_INTERVAL: 50000,
c.LOG_INTERVAL: 1,
# Model
c.MODEL_SETTING: {
c.MODEL_ARCHITECTURE: LSTMSquashedGaussianSAC,
c.KWARGS: {
c.OBS_DIM: obs_dim,
c.HIDDEN_STATE_DIM: hidden_state_dim,
c.ACTION_DIM: action_dim,
c.SHARED_LAYERS: VALUE_BASED_LINEAR_LAYERS(in_dim=obs_dim),
c.INITIAL_ALPHA: 1.,
c.DEVICE: device,
c.NORMALIZE_OBS: False,
c.NORMALIZE_VALUE: False,
},
},
c.OPTIMIZER_SETTING: {
c.POLICY: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 3e-4,
},
},
c.QS: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 3e-4,
},
},
c.ALPHA: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 3e-4,
},
},
},
c.EVALUATION_PREPROCESSING: gt.Identity(),
c.TRAIN_PREPROCESSING: gt.Identity(),
# SAC
c.ACCUM_NUM_GRAD: 1,
c.ACTOR_UPDATE_INTERVAL: 1,
c.BATCH_SIZE: 256,
c.BUFFER_WARMUP: 1000,
c.GAMMA: 0.99,
c.LEARN_ALPHA: True,
c.MAX_GRAD_NORM: 1e10,
c.NUM_GRADIENT_UPDATES: 1,
c.NUM_PREFETCH: 1,
c.REWARD_SCALING: 1.,
c.STEPS_BETWEEN_UPDATE: 1,
c.TARGET_ENTROPY: -action_dim,
c.TARGET_UPDATE_INTERVAL: 5000,
c.TAU: 1.,
c.UPDATE_NUM: 0,
# Progress Tracking
c.CUM_EPISODE_LENGTHS: [0],
c.CURR_EPISODE: 1,
c.NUM_UPDATES: 0,
c.RETURNS: [],
# Save
c.SAVE_PATH: f"../results/pybullet/hopper/gt-sac-fr-lstm-reg_q_targ/{seed}",
# c.SAVE_PATH: None,
# train parameters
c.MAX_TOTAL_STEPS: max_total_steps,
c.TRAIN_RENDER: False,
}
train_sac(experiment_config=experiment_setting)
| [
"torch.device"
] | 1.5.1 | chanb/rl_sandbox_public | e55f954a29880f83a5b0c3358badda4d900f1564 |
1.5 | import numpy as np
import torch
import rl_sandbox.constants as c
import rl_sandbox.transforms.general_transforms as gt
from rl_sandbox.agents.random_agents import UniformContinuousAgent
from rl_sandbox.buffers.wrappers.torch_buffer import TorchBuffer
from rl_sandbox.envs.wrappers.absorbing_state import AbsorbingStateWrapper
from rl_sandbox.envs.wrappers.action_repeat import ActionRepeatWrapper
from rl_sandbox.envs.wrappers.frame_stack import FrameStackWrapper
from rl_sandbox.train.train_dac_sac import train_dac_sac
from rl_sandbox.model_architectures.actor_critics.fully_connected_soft_actor_critic import FullyConnectedSeparate, FullyConnectedSquashedGaussianSAC
from rl_sandbox.model_architectures.discriminators.fully_connected_discriminators import ActionConditionedFullyConnectedDiscriminator
from rl_sandbox.model_architectures.layers_definition import VALUE_BASED_LINEAR_LAYERS, SAC_DISCRIMINATOR_LINEAR_LAYERS
seed = 1
obs_dim = 12
action_dim = 3
min_action = -np.ones(action_dim)
max_action = np.ones(action_dim)
device = torch.device("cuda:0")
# device = torch.device(c.CPU)
action_repeat = 1
num_frames = 1
memory_size = max_total_steps = 1000000 // action_repeat
experiment_setting = {
# Auxiliary Tasks
c.AUXILIARY_TASKS: {},
# Buffer
c.BUFFER_PREPROCESSING: gt.AsType(),
c.BUFFER_SETTING: {
c.KWARGS: {
c.MEMORY_SIZE: memory_size,
c.OBS_DIM: (obs_dim,),
c.H_STATE_DIM: (1,),
c.ACTION_DIM: (action_dim,),
c.REWARD_DIM: (1,),
c.INFOS: {c.MEAN: ((action_dim,), np.float32),
c.VARIANCE: ((action_dim,), np.float32),
c.ENTROPY: ((action_dim,), np.float32),
c.LOG_PROB: ((1,), np.float32),
c.VALUE: ((1,), np.float32),
c.DISCOUNTING: ((1,), np.float32)},
c.CHECKPOINT_INTERVAL: 0,
c.CHECKPOINT_PATH: None,
},
c.STORAGE_TYPE: c.RAM,
c.STORE_NEXT_OBSERVATION: True,
c.BUFFER_WRAPPERS: [
{
c.WRAPPER: TorchBuffer,
c.KWARGS: {},
},
],
},
# Environment
c.ACTION_DIM: action_dim,
c.CLIP_ACTION: True,
c.ENV_SETTING: {
c.ENV_BASE: {
c.ENV_NAME: "Hopper-v2"
},
c.ENV_TYPE: c.GYM,
c.ENV_WRAPPERS: [
{
c.WRAPPER: AbsorbingStateWrapper,
c.KWARGS: {
c.CREATE_ABSORBING_STATE: True,
c.MAX_EPISODE_LENGTH: 1000,
}
},
{
c.WRAPPER: ActionRepeatWrapper,
c.KWARGS: {
c.ACTION_REPEAT: action_repeat,
c.DISCOUNT_FACTOR: 1.,
c.ENABLE_DISCOUNTING: False,
}
},
{
c.WRAPPER: FrameStackWrapper,
c.KWARGS: {
c.NUM_FRAMES: num_frames,
}
}
]
},
c.MIN_ACTION: min_action,
c.MAX_ACTION: max_action,
c.OBS_DIM: obs_dim,
# Evaluation
c.EVALUATION_FREQUENCY: 5000,
c.EVALUATION_RENDER: False,
c.EVALUATION_RETURNS: [],
c.NUM_EVALUATION_EPISODES: 10,
# Exploration
c.EXPLORATION_STEPS: 1000,
c.EXPLORATION_STRATEGY: UniformContinuousAgent(min_action,
max_action,
np.random.RandomState(seed)),
# General
c.DEVICE: device,
c.SEED: seed,
# Load
c.LOAD_MODEL: False,
# Logging
c.PRINT_INTERVAL: 5000,
c.SAVE_INTERVAL: 500000,
# Model
c.MODEL_SETTING: {
c.MODEL_ARCHITECTURE: FullyConnectedSeparate,
c.KWARGS: {
c.OBS_DIM: obs_dim,
c.ACTION_DIM: action_dim,
c.SHARED_LAYERS: VALUE_BASED_LINEAR_LAYERS(in_dim=obs_dim),
c.INITIAL_ALPHA: 1.,
c.DEVICE: device,
c.NORMALIZE_OBS: False,
c.NORMALIZE_VALUE: False,
},
},
c.OPTIMIZER_SETTING: {
c.POLICY: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 1e-5,
},
},
c.QS: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 1e-3,
},
},
c.ALPHA: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 1e-3,
},
},
c.DISCRIMINATOR: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 1e-3,
},
},
},
# DAC
c.EXPERT_BUFFER: "./expert_buffers/mujoco/hopper-v2/gt-sac-separate-fix.pkl",
c.DISCRIMINATOR_SETTING: {
c.MODEL_ARCHITECTURE: ActionConditionedFullyConnectedDiscriminator,
c.KWARGS: {
c.OBS_DIM: obs_dim,
c.ACTION_DIM: action_dim,
c.OUTPUT_DIM: 1,
c.LAYERS: SAC_DISCRIMINATOR_LINEAR_LAYERS(in_dim=obs_dim + action_dim),
c.DEVICE: device,
}
},
c.DISCRIMINATOR_BATCH_SIZE: 256,
c.GRADIENT_PENALTY_LAMBDA: 10.,
# SAC
c.ACCUM_NUM_GRAD: 1,
c.BATCH_SIZE: 256,
c.BUFFER_WARMUP: 1000,
c.EVALUATION_PREPROCESSING: gt.Identity(),
c.GAMMA: 0.99,
c.LEARN_ALPHA: True,
c.MAX_GRAD_NORM: 10,
c.NUM_GRADIENT_UPDATES: 1,
c.NUM_PREFETCH: 1,
c.REWARD_SCALING: 1.,
c.STEPS_BETWEEN_UPDATE: 1,
c.TARGET_ENTROPY: -3.,
c.TARGET_UPDATE_INTERVAL: 1,
c.TAU: 0.005,
c.TRAIN_PREPROCESSING: gt.Identity(),
c.UPDATE_NUM: 0,
# Progress Tracking
c.CUM_EPISODE_LENGTHS: [0],
c.CURR_EPISODE: 1,
c.NUM_UPDATES: 0,
c.RETURNS: [],
# Save
c.SAVE_PATH: "./results/hopper-v2/dac/gt-sac-separate-next_obs",
# c.SAVE_PATH: None,
# train parameters
c.MAX_TOTAL_STEPS: max_total_steps,
c.TRAIN_RENDER: False,
}
train_dac_sac(experiment_config=experiment_setting)
| [
"torch.device"
] | 1.5.1 | chanb/rl_sandbox_public | e55f954a29880f83a5b0c3358badda4d900f1564 |
1.3 | import os
import torch
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True) # first position is score; second position is pred.
pred = pred.t() # .t() is T of matrix (256 * 1) -> (1 * 256)
correct = pred.eq(target.view(1, -1).expand_as(pred)) # target.view(1,2,2,-1): (256,) -> (1, 2, 2, 64)
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def adjust_learning_rate(optimizer, epoch, learning_rate, end_epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if epoch in [round(end_epoch * 0.333), round(end_epoch * 0.666)]:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.2
learning_rate = learning_rate* 0.2
print('Adjust_learning_rate ' + str(epoch))
print('New_LearningRate: {}'.format(learning_rate))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, at_type=''):
if not os.path.exists('./model'):
os.makedirs('./model')
epoch = state['epoch']
save_dir = './model/'+at_type+'_' + str(epoch) + '_' + str(round(float(state['prec1']), 4))
torch.save(state, save_dir)
print(save_dir)
| [
"torch.save"
] | 1.3.0 | bryant1410/Emotion-FAN | 8a4ea4f0eacced38e8f4c50ad37515e84c781ab8 |
1.2 | import math
from collections import Counter
from pathlib import Path
from typing import List, Dict, Any, Tuple
import torch
import tqdm
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.tensorboard import SummaryWriter
from dp.model.model import Model
from dp.model.utils import _trim_util_stop
from dp.preprocessing.text import Preprocessor
from dp.training.dataset import new_dataloader
from dp.training.decorators import ignore_exception
from dp.training.losses import CrossEntropyLoss, CTCLoss
from dp.training.evaluation import evaluate_samples
from dp.utils.io import to_device, unpickle_binary
class Trainer:
""" Performs model training. """
def __init__(self, checkpoint_dir: Path, loss_type='ctc') -> None:
"""
Initializes a Trainer object.
Args:
checkpoint_dir (Path): Directory to store the model checkpoints.
loss_type (str): Type of loss: 'ctc' for forward transformer models
and 'cross_entropy' for autoregressive models.
"""
self.checkpoint_dir = checkpoint_dir
self.checkpoint_dir.mkdir(parents=True, exist_ok=True)
self.writer = SummaryWriter(log_dir=str(self.checkpoint_dir / 'logs'))
self.loss_type = loss_type
if loss_type == 'ctc':
self.criterion = CTCLoss()
elif loss_type == 'cross_entropy':
self.criterion = CrossEntropyLoss()
else:
raise ValueError(f'Loss not supported: {loss_type}')
def train(self,
model: Model,
checkpoint: Dict[str, Any],
store_phoneme_dict_in_model: bool = True) -> None:
"""
Performs training of a transformer model.
Args:
model (Model): Model to be trained (can be a fresh model or restored from a checkpoint).
checkpoint (Dict[str, Any]): Dictionary with entries 'optimizer': optimizer state dict,
'preprocessor': Preprocessor and 'config': Dict.
store_phoneme_dict_in_model (bool): Whether to store a dictionary of word-phoneme mappings
in the model checkpoint so that it can be automatically
loaded by a Phonemizer object.
Returns:
None: the checkpoints will be stored in a folder provided when instantiating a Trainer.
"""
config = checkpoint['config']
data_dir = Path(config['paths']['data_dir'])
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = model.to(device)
model.train()
criterion = self.criterion.to(device)
optimizer = Adam(model.parameters())
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
for g in optimizer.param_groups:
g['lr'] = config['training']['learning_rate']
train_loader = new_dataloader(dataset_file=data_dir / 'train_dataset.pkl',
drop_last=True, batch_size=config['training']['batch_size'])
val_loader = new_dataloader(dataset_file=data_dir / 'val_dataset.pkl',
drop_last=False, batch_size=config['training']['batch_size_val'])
if store_phoneme_dict_in_model:
phoneme_dict = unpickle_binary(data_dir / 'phoneme_dict.pkl')
checkpoint['phoneme_dict'] = phoneme_dict
val_batches = sorted([b for b in val_loader], key=lambda x: -x['text_len'][0])
scheduler = ReduceLROnPlateau(optimizer,
factor=config['training']['scheduler_plateau_factor'],
patience=config['training']['scheduler_plateau_patience'],
mode='min')
losses = []
best_per = math.inf
if 'step' not in checkpoint:
checkpoint['step'] = 0
start_epoch = checkpoint['step'] // len(train_loader)
for epoch in range(start_epoch + 1, config['training']['epochs'] + 1):
pbar = tqdm.tqdm(enumerate(train_loader, 1), total=len(train_loader))
for i, batch in pbar:
checkpoint['step'] += 1
step = checkpoint['step']
self._set_warmup_lr(optimizer=optimizer, step=step,
config=config)
batch = to_device(batch, device)
avg_loss = sum(losses) / len(losses) if len(losses) > 0 else math.inf
pbar.set_description(desc=f'Epoch: {epoch} | Step {step} '
f'| Loss: {avg_loss:#.4}', refresh=True)
pred = model(batch)
loss = criterion(pred, batch)
if not (torch.isnan(loss) or torch.isinf(loss)):
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
losses.append(loss.item())
self.writer.add_scalar('Loss/train', loss.item(), global_step=step)
self.writer.add_scalar('Params/batch_size', config['training']['batch_size'],
global_step=step)
self.writer.add_scalar('Params/learning_rate', [g['lr'] for g in optimizer.param_groups][0],
global_step=step)
if step % config['training']['validate_steps'] == 0:
val_loss = self._validate(model, val_batches)
self.writer.add_scalar('Loss/val', val_loss, global_step=step)
if step % config['training']['generate_steps'] == 0:
lang_samples = self._generate_samples(model=model,
preprocessor=checkpoint['preprocessor'],
val_batches=val_batches)
eval_result = evaluate_samples(lang_samples=lang_samples)
self._write_summaries(lang_samples=lang_samples,
eval_result=eval_result,
n_generate_samples=config['training']['n_generate_samples'],
step=step)
if eval_result['mean_per'] is not None and eval_result['mean_per'] < best_per:
self._save_model(model=model, optimizer=optimizer, checkpoint=checkpoint,
path=self.checkpoint_dir / f'best_model.pt')
self._save_model(model=model, optimizer=None, checkpoint=checkpoint,
path=self.checkpoint_dir / f'best_model_no_optim.pt')
scheduler.step(eval_result['mean_per'])
if step % config['training']['checkpoint_steps'] == 0:
step = step // 1000
self._save_model(model=model, optimizer=optimizer, checkpoint=checkpoint,
path=self.checkpoint_dir / f'model_step_{step}k.pt')
losses = []
self._save_model(model=model, optimizer=optimizer, checkpoint=checkpoint,
path=self.checkpoint_dir / 'latest_model.pt')
def _validate(self, model: Model, val_batches: List[dict]) -> float:
device = next(model.parameters()).device
criterion = self.criterion.to(device)
model.eval()
val_losses = []
for batch in val_batches:
batch = to_device(batch, device)
with torch.no_grad():
pred = model(batch)
loss = criterion(pred, batch)
if not (torch.isnan(loss) or torch.isinf(loss)):
val_losses.append(loss.item())
model.train()
return sum(val_losses) / len(val_losses)
@ignore_exception
def _generate_samples(self,
model: Model,
preprocessor: Preprocessor,
val_batches: List[dict]) -> Dict[str, List[Tuple[List[str], List[str], List[str]]]]:
""" Returns a dictionary with entries lang: Tuple of (word, generated, target) """
device = next(model.parameters()).device
model.eval()
text_tokenizer = preprocessor.text_tokenizer
phoneme_tokenizer = preprocessor.phoneme_tokenizer
lang_tokenizer = preprocessor.lang_tokenizer
lang_prediction_result = dict()
for batch in val_batches:
batch = to_device(batch, device)
generated_batch, _ = model.generate(batch)
for i in range(batch['text'].size(0)):
text_len = batch['text_len'][i]
text = batch['text'][i, :text_len]
target = batch['phonemes'][i, :]
lang = batch['language'][i]
lang = lang_tokenizer.decode(lang.detach().cpu().item())
generated = generated_batch[i, :].cpu()
generated = _trim_util_stop(generated, phoneme_tokenizer.end_index)
text, target = text.detach().cpu(), target.detach().cpu()
text = text_tokenizer.decode(text, remove_special_tokens=True)
generated = phoneme_tokenizer.decode(generated, remove_special_tokens=True)
target = phoneme_tokenizer.decode(target, remove_special_tokens=True)
lang_prediction_result[lang] = lang_prediction_result.get(lang, []) + [(text, generated, target)]
model.train()
return lang_prediction_result
@ignore_exception
def _write_summaries(self,
lang_samples: Dict[str, List[Tuple[List[str], List[str], List[str]]]],
eval_result: Dict[str, Any],
n_generate_samples: int,
step: int) -> None:
self.writer.add_scalar(f'Phoneme_Error_Rate/mean',
eval_result['mean_per'], global_step=step)
self.writer.add_scalar(f'Word_Error_Rate/mean',
eval_result['mean_wer'], global_step=step)
for lang in lang_samples.keys():
result = eval_result[lang]
self.writer.add_scalar(f'Phoneme_Error_Rate/{lang}',
result['per'], global_step=step)
self.writer.add_scalar(f'Word_Error_Rate/{lang}',
result['wer'], global_step=step)
for lang, samples in lang_samples.items():
samples = [(''.join(w), ''.join(p), ''.join(t)) for w, p, t in samples]
word_counts = Counter([word for word, _, _ in samples])
samples_dedup = [(w, p, t) for w, p, t in samples if word_counts[w] == 1]
log_texts = dict()
for word, pred, target in samples_dedup:
log_texts[word] = f' {word:<30} {pred:<30} {target:<30}'
log_text_items = sorted(log_texts.items(), key=lambda x: -len(x[0]))
log_text_list = [v for k, v in log_text_items]
log_text = '\n'.join(log_text_list[:n_generate_samples])
self.writer.add_text(f'{lang}/text_prediction_target', log_text, global_step=step)
def _save_model(self,
model: torch.nn.Module,
optimizer: torch.optim,
checkpoint: Dict[str, Any],
path: Path) -> None:
checkpoint['model'] = model.state_dict()
if optimizer is not None:
checkpoint['optimizer'] = optimizer.state_dict()
else:
checkpoint['optimizer'] = None
torch.save(checkpoint, str(path))
def _set_warmup_lr(self,
optimizer: torch.optim,
step: int,
config: Dict[str, Any]) -> None:
warmup_steps = config['training']['warmup_steps']
if warmup_steps > 0 and step <= warmup_steps:
warmup_factor = 1.0 - max(warmup_steps - step, 0) / warmup_steps
for g in optimizer.param_groups:
g['lr'] = config['training']['learning_rate'] * warmup_factor
| [
"torch.device",
"torch.isnan",
"torch.no_grad",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.isinf"
] | 1.2.0 | ishine/DeepPhonemizer | b8f170764c7648fe2acb552b787099ab4f941e58 |
0.4 | import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
cuda = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
class Topo_Generator(nn.Module):
def __init__(self, latent_dim, img_size, channels, subscripted_views):
super(Generator, self).__init__()
self.latent_dim = latent_dim
self.img_size = img_size
self.channels = channels
self.subscripted_views = subscripted_views
self.transform = UpperDiagonalThresholdedLogTransform(0.1)
def get_init(num_elements):
transform = UpperDiagonalThresholdedLogTransform(0.1)
return transform(pers_dgm_center_init(num_elements))
# self.model = nn.Sequential(
# nn.Linear(self.latent_dim, 128),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Linear(128, 256),
# nn.BatchNorm1d(256),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Linear(256, 512),
# nn.BatchNorm1d(512),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Linear(512, 1024),
# nn.BatchNorm1d(1024),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Linear(1024, self.img_size**2),
# nn.Tanh()
# )
def get_init(n_elements):
transform = UpperDiagonalThresholdedLogTransform(0.1)
return transform(pers_dgm_center_init(n_elements))
self.dim_0 = SLayer(150, 2, get_init(150), torch.ones(150, 2) * 3)
self.dim_0_ess = SLayer(50, 1)
self.dim_1_ess = SLayer(50, 1)
self.slayers = [self.dim_0,
self.dim_0_ess,
self.dim_1_ess
]
self.stage_1 = []
stage_1_outs = [75, 25, 25]
for i, (n_in, n_out) in enumerate(zip([150, 50, 50], stage_1_outs)):
seq = nn.Sequential()
seq.add_module('linear_1', nn.Linear(n_in, n_out))
seq.add_module('batch_norm', nn.BatchNorm1d(n_out))
seq.add_module('drop_out_1', nn.Dropout(0.1))
seq.add_module('linear_2', nn.Linear(n_out, n_out))
seq.add_module('relu', nn.ReLU())
seq.add_module('drop_out_2', nn.Dropout(0.1))
self.stage_1.append(seq)
self.add_module('stage_1_{}'.format(i), seq)
linear_1 = nn.Sequential()
linear_1.add_module('linear_1', nn.Linear(sum(stage_1_outs), 200))
linear_1.add_module('batchnorm_1', torch.nn.BatchNorm1d(200))
linear_1.add_module('relu_1', nn.ReLU())
linear_1.add_module('linear_2', nn.Linear(200, 100))
linear_1.add_module('batchnorm_2', torch.nn.BatchNorm1d(100))
linear_1.add_module('drop_out_2', torch.nn.Dropout(0.1))
linear_1.add_module('relu_2', nn.ReLU())
linear_1.add_module('linear_3', nn.Linear(100, 50))
linear_1.add_module('batchnorm_3', nn.BatchNorm1d(50))
linear_1.add_module('relu_3', nn.ReLU())
linear_1.add_module('linear_4', nn.Linear(50, 5))
linear_1.add_module('batchnorm_4', nn.BatchNorm1d(5))
self.linear_1 = linear_1
def forward(self, noise):
# import pdb;
# img = self.model(noise)
# # pdb.set_trace(im)
# img = img.view(img.size()[0], self.channels, self.img_size, self.img_size)
#
# return img
x = [batch[n] for n in self.subscripted_views]
x = [
[self.transform(dgm) for dgm in x[0]],
[reduce_essential_dgm(dgm) for dgm in x[1]],
[reduce_essential_dgm(dgm) for dgm in x[2]]
]
x_sl = [l(xx) for l, xx in zip(self.slayers, x)]
x = [l(xx) for l, xx in zip(self.stage_1, x_sl)]
x = torch.cat(x, 1)
x = self.linear_1(x)
return x
class Discriminator(nn.Module):
def __init__(self, img_size, latent_dim):
super(Discriminator, self).__init__()
self.img_size = img_size
self.latent_dim = latent_dim
self.model = nn.Sequential(
nn.Linear(self.img_size**2 + self.latent_dim, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid()
)
def forward(self, img, latent_vector):
img_flat = img.view(img.size()[0], -1)
validity = self.model(torch.cat([img_flat, latent_vector],1))
return validity
class Topo_Decoder(nn.Module):
def __init__(self, img_size, latent_dim):
super(Decoder, self).__init__()
self.img_size = img_size
self.latent_dim = latent_dim
self.model = nn.Sequential(
nn.Linear(self.img_size**2, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, self.latent_dim),
nn.Sigmoid()
)
def forward(self, img):
# import pdb; pdb.set_trace()
img_flat = img.view(img.size()[0], -1)
validity = self.model(img_flat) #64x784
return validity
def train_discriminator(discriminator, imgs, latent_vector):
# imgs = imgs.view(imgs.size()[0], -1)
# vector = torch.cat([imgs, latent_vector], 1)
# return discriminator(vector)
return discriminator(imgs, latent_vector)
def get_loss_discriminator(discriminator, fake_imgs, z, real_imgs, fake_z):
adversarial_loss = nn.BCELoss()
# minibatch_size = discriminator_real.size()[0]
minibatch_size = real_imgs.size()[0]
valid = Variable(Tensor(minibatch_size, 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(minibatch_size, 1).fill_(0.0), requires_grad=False)
real_loss = adversarial_loss(train_discriminator(discriminator, real_imgs, fake_z), valid)
fake_loss = adversarial_loss(train_discriminator(discriminator, fake_imgs.detach(), z), fake)
return (real_loss + fake_loss) / 2
def get_loss_generator(discriminator, fake_imgs, z, real_imgs, fake_z):
objection = nn.BCELoss()
minibatch_size = fake_imgs.size()[0]
# minibatch_size = self.batch_size
valid = Variable(Tensor(minibatch_size, 1).fill_(1.0), requires_grad=False)
valid_prediction = train_discriminator(discriminator, fake_imgs, z)
# import pdb; pdb.set_trace()
return objection(valid_prediction, valid)
def get_loss_wasserstein_discriminator(discriminator, fake_imgs, z, real_imgs, fake_z):
real_validity = discriminator(real_imgs, fake_z)
fake_validity = discriminator(fake_imgs, z)
return real_validity - fake_validity
def get_loss_wasserstein_generator(discriminator, fake_imgs, z, real_imgs, fake_z):
return torch.mean(discriminator(fake_imgs, z))
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.nn.LeakyReLU",
"torch.ones",
"torch.nn.ReLU",
"torch.cuda.is_available",
"torch.nn.BatchNorm1d",
"torch.nn.BCELoss"
] | 0.4.0 | EmmaNguyen/feature_adversarial_with_topology_signatures | efa7db6d0fdf5b2505d67d4341dcdb2ab05a97a7 |
1.0 | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import logging
import math
import os
import sys
import editdistance
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging.meters import StopwatchMeter, TimeMeter
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def add_asr_eval_argument(parser):
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary\
output units",
)
try:
parser.add_argument(
"--lm-weight",
type=float,
default=0.2,
help="weight for lm while interpolating with neural score",
)
except:
pass
parser.add_argument(
"--len-penalty", type=float, default=1.0, help="length penalty on word level"
)
parser.add_argument(
"--w2l-decoder",
# choices=["viterbi", "kenlm", "fairseqlm"],
help="use a w2l decoder",
)
parser.add_argument("--lexicon", help="lexicon for w2l decoder")
parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
parser.add_argument("--beam-threshold", type=float, default=25.0)
parser.add_argument("--beam-size-token", type=float, default=100)
parser.add_argument("--word-score", type=float, default=1.0)
parser.add_argument("--unk-weight", type=float, default=-math.inf)
parser.add_argument("--sil-weight", type=float, default=0.0)
parser.add_argument(
"--dump-emissions",
type=str,
default=None,
help="if present, dumps emissions into this file and exits",
)
parser.add_argument(
"--dump-features",
type=str,
default=None,
help="if present, dumps features into this file and exits",
)
parser.add_argument(
"--load-emissions",
type=str,
default=None,
help="if present, loads emissions from this file",
)
parser.add_argument(
"--eval-temperature",
type=float,
default=1.0,
help="temperature scaling of the logits",
)
parser.add_argument(
"--eval-upsample",
type=float,
default=1.0,
help="upsample factor",
)
return parser
def check_args(args):
# assert args.path is not None, "--path required for generation!"
# assert args.results_path is not None, "--results_path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def get_dataset_itr(args, task, models):
return task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id),
file=res_files["hypo.units"],
)
print(
"{} ({}-{})".format(hyp_words, speaker, id),
file=res_files["hypo.words"],
)
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(tgt_pieces, speaker, id),
file=res_files["ref.units"],
)
print(
"{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
)
# only score top hypothesis
if not args.quiet:
logger.debug("HYPO:" + hyp_words)
logger.debug("TARGET:" + tgt_words)
logger.debug("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def prepare_result_files(args):
def get_res_file(file_prefix):
if args.num_shards > 1:
file_prefix = f"{args.shard_id}_{file_prefix}"
path = os.path.join(
args.results_path,
"{}-{}-{}.txt".format(
file_prefix, os.path.basename(args.path), args.gen_subset
),
)
return open(path, "w", buffering=1)
if not args.results_path:
return None
return {
"hypo.words": get_res_file("hypo.word"),
"hypo.units": get_res_file("hypo.units"),
"ref.words": get_res_file("ref.word"),
"ref.units": get_res_file("ref.units"),
}
def load_models_and_criterions(
filenames, data_path, arg_overrides=None, task=None, model_state=None
):
models = []
criterions = []
if arg_overrides is None:
arg_overrides = {}
arg_overrides["wer_args"] = None
arg_overrides["data"] = data_path
if filenames is None:
assert model_state is not None
filenames = [0]
else:
filenames = filenames.split(":")
for filename in filenames:
if model_state is None:
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename, arg_overrides)
else:
state = model_state
if "cfg" in state:
cfg = state["cfg"]
else:
cfg = convert_namespace_to_omegaconf(state["args"])
if task is None:
if hasattr(cfg.task, "data"):
cfg.task.data = data_path
task = tasks.setup_task(cfg.task)
model = task.build_model(cfg.model)
model.load_state_dict(state["model"], strict=True)
models.append(model)
criterion = task.build_criterion(cfg.criterion)
if state.get("criterion", None) is not None:
criterion.load_state_dict(state["criterion"], strict=True)
criterions.append(criterion)
return models, criterions, task
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
class ExistingEmissionsDecoder(object):
def __init__(self, decoder, emissions):
self.decoder = decoder
self.emissions = emissions
def generate(self, models, sample, **unused):
ids = sample["id"].cpu().numpy()
try:
emissions = np.stack(self.emissions[ids])
except:
print([x.shape for x in self.emissions[ids]])
raise Exception("invalid sizes")
emissions = torch.from_numpy(emissions)
return self.decoder.decode(emissions)
def get_num_param(model):
return sum([p.numel() for p in model.parameters()])
def main(args, task=None, model_state=None):
check_args(args)
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 4000000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
logger.info("| decoding with criterion {}".format(args.criterion))
# Load ensemble
if args.load_emissions:
models, criterions = [], []
task = tasks.setup_task(args)
else:
# task = tasks.setup_task(args)
logger.info("| loading model(s) from {}".format(args.path))
models, criterions, task = load_models_and_criterions(
args.path,
data_path=args.data,
arg_overrides=eval(args.model_overrides), # noqa
# task=task,
model_state=model_state,
)
optimize_models(args, use_cuda, models)
for i, model in enumerate(models):
logger.info(f"| model {i} size: {get_num_param(model)}")
for name, m in model.named_children():
logger.info(f"| | model {i} {name} size: {get_num_param(m)}")
for name2, m2 in m.named_children():
logger.info(
f"| | | model {i} {name}.{name2} size: {get_num_param(m2)}"
)
for name3, m3 in m2.named_children():
logger.info(
f"| | | | model {i} {name}.{name2}.{name3} size: {get_num_param(m3)}"
)
# Load dataset splits
task.load_dataset(args.gen_subset)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
args.data, args.gen_subset, len(task.dataset(args.gen_subset))
)
)
# hack to pass transitions to W2lDecoder
if args.criterion == "asg_loss":
trans = criterions[0].asg.trans.data
args.asg_transitions = torch.flatten(trans).tolist()
# Load dataset (possibly sharded)
itr = get_dataset_itr(args, task, models)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(args):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
# from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
from slue_toolkit.fairseq_addon.decoder.w2l_decoder_old import (
W2lViterbiDecoder,
)
return W2lViterbiDecoder(args, task.target_dictionary)
elif w2l_decoder == "kenlm":
# from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
from slue_toolkit.fairseq_addon.decoder.w2l_decoder_old import (
W2lKenLMDecoder,
)
return W2lKenLMDecoder(args, task.target_dictionary)
elif w2l_decoder == "fairseqlm":
from slue_toolkit.fairseq_addon.decoder.w2l_decoder_old import (
W2lFairseqLMDecoder,
)
return W2lFairseqLMDecoder(args, task.target_dictionary)
elif w2l_decoder == "argmax":
from slue_toolkit.fairseq_addon.decoder.ctc_decoder import CTCArgMaxDecoder
return CTCArgMaxDecoder(args, task.target_dictionary)
elif w2l_decoder == "s2s":
from fairseq.dataclass.configs import GenerationConfig
gen_cfg = GenerationConfig(beam=args.beam)
if args.kenlm_model:
overrides = dict()
lms, _ = checkpoint_utils.load_model_ensemble(
[args.kenlm_model], arg_overrides=overrides, task=None
)
lms[0].eval()
optimize_models(args, use_cuda, lms)
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": args.lm_weight}
generator = task.build_generator(
[model], gen_cfg, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
print(f"lm model: {generator.lm_model}")
else:
generator = task.build_generator([model], gen_cfg)
return generator
else:
print(
"only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
)
# please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
generator = build_generator(args)
if args.load_emissions:
generator = ExistingEmissionsDecoder(
generator, np.load(args.load_emissions, allow_pickle=True)
)
logger.info("loaded emissions from " + args.load_emissions)
num_sentences = 0
if args.results_path is not None and not os.path.exists(args.results_path):
os.makedirs(args.results_path)
max_source_pos = (
utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
),
)
if max_source_pos is not None:
max_source_pos = max_source_pos[0]
if max_source_pos is not None:
max_source_pos = max_source_pos[0] - 1
if args.dump_emissions:
emissions = {}
if args.dump_features:
features = {}
models[0].bert.proj = None
else:
res_files = prepare_result_files(args)
errs_t = 0
lengths_t = 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
full_timer = StopwatchMeter()
full_timer.start()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
def apply_half(t):
if t.dtype is torch.float32:
return t.half()
return t
if args.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
gen_timer.start()
if args.dump_emissions:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
emm = emm.transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
emissions[id.item()] = emm[i]
continue
elif args.dump_features:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
padding = (
encoder_out["encoder_padding_mask"][i].cpu().numpy()
if encoder_out["encoder_padding_mask"] is not None
else None
)
features[id.item()] = (feat[i], padding)
continue
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
speaker = None
# id = task.dataset(args.gen_subset).ids[int(sample_id)]
id = sample_id
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
# Process top predictions
errs, length = process_predictions(
args,
hypos[i],
None,
tgt_dict,
target_tokens,
res_files,
speaker,
id,
)
errs_t += errs
lengths_t += length
wps_meter.update(num_generated_tokens)
t.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
full_timer.stop()
wer = None
if args.dump_emissions:
emm_arr = []
for i in range(len(emissions)):
emm_arr.append(emissions[i])
np.save(args.dump_emissions, np.array(emm_arr, dtype="object"))
logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
elif args.dump_features:
feat_arr = []
for i in range(len(features)):
feat_arr.append(features[i])
np.save(args.dump_features, np.array(feat_arr, dtype="object"))
logger.info(f"saved {len(features)} emissions to {args.dump_features}")
else:
if lengths_t > 0:
wer = errs_t * 100.0 / lengths_t
logger.info(f"WER: {wer}")
logger.info(f"full time used: {full_timer.sum}")
logger.info(f"time used: {gen_timer.sum}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.2f} s ({:.2f}"
" sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
# write the WER info into file in case it's called by infer_multiprocess
if args.results_path is not None and not os.path.exists(args.results_path):
with open(os.path.join(args.results_path, "wer"), "w") as fw_wer:
fw_wer.write(f"{errs_t} {lengths_t}")
return task, wer
def make_parser():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
return parser
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| [
"torch.no_grad",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.flatten"
] | 1.0.0 | siddhu001/slue-toolkit | b8a62ef941a812ce277cf6a4af08d6065af8bec6 |
1.7 | """This is reservoir sampling, each sample has storage-probability 'buffer samples M / seen samples'
"""
import torch
import torch.nn as nn
import torch.optim as optim
import random
class Net(nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.net = args.net
self.ce = nn.CrossEntropyLoss()
self.n_outputs = n_outputs
self.opt = optim.SGD(self.parameters(), args.lr)
self.n_memories = args.n_memories
self.n_sampled_memories = args.n_sampled_memories
self.n_constraints = args.n_constraints
self.gpu = args.cuda
self.batch_size = args.batch_size
self.n_iter = args.n_iter
# allocate ring buffer
self.x_buffer = []
self.y_buffer = []
# allocate counters
self.observed_tasks = []
self.old_task = -1
self.seen_cnt = 0
def forward(self, x, t=0):
output = self.net(x)
return output
def observe(self, x, t, y):
""" Train. """
# Update ring buffer storing examples from current task
bsz = y.data.size(0)
# Step over new batch and batch from memory
for iter_i in range(self.n_iter):
self.zero_grad()
x_init = x.clone()
y_init = y.clone()
if self.gpu:
x_init = x_init.cuda()
y_init = y_init.cuda()
sample_bs = bsz
if self.n_memories > 0 and len(self.x_buffer) > 0: # Sample
perm = torch.randperm(len(self.x_buffer))
idx = perm[:sample_bs]
x_s = torch.stack(self.x_buffer)[idx]
y_s = torch.stack(self.y_buffer)[idx]
x_s, y_s = (x_s.cuda(), y_s.cuda()) if self.gpu else (x_s.cpu(), y_s.cpu())
x_ext = torch.cat([x_init, x_s])
y_ext = torch.cat([y_init, y_s])
else:
x_ext = x_init
y_ext = y_init
loss = self.ce(self.forward(x_ext), y_ext)
loss.backward()
self.opt.step()
# Update buffers
for i in range(bsz):
if self.seen_cnt < self.n_memories:
self.x_buffer.append(x[i])
self.y_buffer.append(y[i])
else:
j = random.randrange(self.seen_cnt)
if j < self.n_memories:
self.x_buffer[j] = x[i]
self.y_buffer[j] = y[i]
self.seen_cnt += 1
assert len(self.x_buffer) <= self.n_memories
assert len(self.x_buffer) == len(self.y_buffer)
def get_hyperparam_list(self, args):
return []
| [
"torch.cat",
"torch.stack",
"torch.nn.CrossEntropyLoss"
] | 1.7.0 | kreimanlab/AugMem | cb0e8d39eb0c469da46c7c550c19229927a2bec5 |
1.2 | import os
import gc
import time
import torch
import random
import argparse
import numpy as np
import pandas as pd
from glob import glob
from tqdm import tqdm
from trains import *
from config import *
from utils.log import *
from utils.metricsTop import *
from utils.functions import *
from models.AMIO import AMIO
from trains.ATIO import ATIO
from data.load_data import FERDataLoader
from sklearn.model_selection import KFold
def do_test(args, model, dataloader, mode='test'):
model.eval()
y_true, y_pred = [], []
metrics = MetricsTop(args).getMetrics(args.metricsName)
features = []
with torch.no_grad():
for batch_data in tqdm(dataloader[mode]):
data = batch_data['data'].to(args.device)
labels = batch_data['labels'].to(args.device)
emotions = batch_data['emotions']
# model
output = model(data)
features.append(output['fer_feature'].cpu().numpy())
y_true.append(labels.cpu().numpy())
features = np.concatenate(features, axis=0)
labels = np.concatenate(y_true, axis=0)
return features, labels
def run(args):
if not os.path.exists(args.res_save_path):
os.mkdir(args.res_save_path)
# get dst dataset params
config = Config(args)
args = config.get_config()
# train_df0 = pd.read_csv(os.path.join(args.label_dir,'train.csv'))
# kf = KFold(10,shuffle = False)
# for knum, indexs in enumerate(kf.split(train_df0)):
# # print(indexs)
# args.train_df = train_df0.iloc[indexs[0],:]
# args.test_df = train_df0.iloc[indexs[1],:]
# break
args.train_df = pd.read_csv(os.path.join(args.label_dir,'train.csv'))
args.test_df = pd.read_csv(os.path.join(args.label_dir,'test.csv'))
# get dataloader
dataloader = FERDataLoader(args)
# gpu
using_cuda = len(args.gpu_ids) > 0 and torch.cuda.is_available()
device = torch.device('cuda:%d' % args.gpu_ids[0] if using_cuda else 'cpu')
args.device = device
# build model
model = AMIO(args).to(device)
atio = ATIO().getTrain(args)
# load best model
model_save_pathes = glob(os.path.join(args.model_save_path,\
f'{args.modelName}-{args.datasetName}.pth'))
assert len(model_save_pathes) == 1
model.load_state_dict(torch.load(model_save_pathes[0]))
# do test
mode = 'test'
features, labels = do_test(args, model, dataloader, mode=mode)
save_path = os.path.join(args.res_save_path, f'{args.modelName}-{args.datasetName}-{mode}.npz')
np.savez(save_path, features=features, labels=labels)
mode = 'train'
features, labels = do_test(args, model, dataloader, mode=mode)
save_path = os.path.join(args.res_save_path, f'{args.modelName}-{args.datasetName}-{mode}.npz')
np.savez(save_path, features=features, labels=labels)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--tf_mode', type=bool, default=False,
help='is transfer test ?')
parser.add_argument('--val_mode', type=bool, default=False,
help='10 folds cross validation ?')
parser.add_argument('--modelName', type=str, default='FER_DCNN',
help='support FER_DCNN/Our')
parser.add_argument('--datasetName', type=str, default='RAF',
help='support RAF/SFEW2/CK+/OULU_CASIA')
parser.add_argument('--num_workers', type=int, default=8,
help='num workers of loading data')
parser.add_argument('--model_save_path', type=str, default='results/bestModels',
help='path to save model.')
parser.add_argument('--res_save_path', type=str, default='results/Features',
help='path to save results.')
parser.add_argument('--gpu_ids', type=list, default=[0],
help='indicates the gpus will be used.')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
# args.seeds = [1, 12, 123, 1234, 12345]
run(args) | [
"torch.device",
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
] | 1.2.0 | thuiar/cmcnn | a18f09fa63baf74bb083779fa0a8881d55226e1a |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import Sequence
from unittest import mock
import pytest
import torch
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import Sampler, SequentialSampler
from pytorch_lightning import Trainer
from pytorch_lightning.trainer.supporters import (
_nested_calc_num_data,
CombinedDataset,
CombinedLoader,
CombinedLoaderIterator,
CycleIterator,
TensorRunningAccum,
)
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.exceptions import MisconfigurationException
def test_tensor_running_accum_reset():
"""Test that reset would set all attributes to the initialization state."""
window_length = 10
accum = TensorRunningAccum(window_length=window_length)
assert accum.last() is None
assert accum.mean() is None
accum.append(torch.tensor(1.5))
assert accum.last() == torch.tensor(1.5)
assert accum.mean() == torch.tensor(1.5)
accum.reset()
assert accum.window_length == window_length
assert accum.memory is None
assert accum.current_idx == 0
assert accum.last_idx is None
assert not accum.rotated
def test_cycle_iterator():
"""Test the cycling function of `CycleIterator`"""
iterator = CycleIterator(range(100), 1000)
assert len(iterator) == 1000
for idx, item in enumerate(iterator):
assert item < 100
assert idx == len(iterator) - 1
def test_none_length_cycle_iterator():
"""Test the infinite cycling function of `CycleIterator`"""
iterator = CycleIterator(range(100))
assert iterator.__len__() == float("inf")
# test infinite loop
for idx, item in enumerate(iterator):
if idx == 1000:
break
assert item == 0
@pytest.mark.parametrize(
["dataset_1", "dataset_2"],
[
([list(range(10)), list(range(20))]),
([range(10), range(20)]),
([torch.randn(10, 3, 2), torch.randn(20, 5, 6)]),
([TensorDataset(torch.randn(10, 3, 2)), TensorDataset(torch.randn(20, 5, 6))]),
],
)
def test_combined_dataset(dataset_1, dataset_2):
"""Verify the length of the CombinedDataset."""
datasets = [dataset_1, dataset_2]
combined_dataset = CombinedDataset(datasets)
assert combined_dataset.max_len == 20
assert combined_dataset.min_len == len(combined_dataset) == 10
def test_combined_dataset_length_mode_error():
dset = CombinedDataset([range(10)])
with pytest.raises(MisconfigurationException, match="Invalid Mode"):
dset._calc_num_data([range(10)], "test")
def test_combined_loader_iterator_dict_min_size():
"""Test `CombinedLoaderIterator` given mapping loaders."""
loaders = {
"a": torch.utils.data.DataLoader(range(10), batch_size=4),
"b": torch.utils.data.DataLoader(range(20), batch_size=5),
}
combined_iter = CombinedLoaderIterator(loaders)
for idx, item in enumerate(combined_iter):
assert isinstance(item, dict)
assert len(item) == 2
assert "a" in item and "b" in item
assert idx == min(len(loaders["a"]), len(loaders["b"])) - 1
def test_combined_loader_init_mode_error():
"""Test the ValueError when constructing `CombinedLoader`"""
with pytest.raises(MisconfigurationException, match="Invalid Mode"):
CombinedLoader([range(10)], "testtt")
def test_combined_loader_loader_type_error():
"""Test the ValueError when wrapping the loaders."""
with pytest.raises(TypeError, match="Expected data to be int, Sequence or Mapping, but got NoneType"):
CombinedLoader(None, "max_size_cycle")
def test_combined_loader_calc_length_mode_error():
"""Test the ValueError when calculating the number of batches."""
with pytest.raises(TypeError, match="Expected data to be int, Sequence or Mapping, but got NoneType"):
CombinedLoader._calc_num_batches(None)
def test_combined_loader_dict_min_size():
"""Test `CombinedLoader` of mode 'min_size' given mapping loaders."""
loaders = {
"a": torch.utils.data.DataLoader(range(10), batch_size=4),
"b": torch.utils.data.DataLoader(range(20), batch_size=5),
}
combined_loader = CombinedLoader(loaders, "min_size")
assert len(combined_loader) == min(len(v) for v in loaders.values())
for idx, item in enumerate(combined_loader):
assert isinstance(item, dict)
assert len(item) == 2
assert "a" in item and "b" in item
assert idx == len(combined_loader) - 1
def test_combined_loader_dict_max_size_cycle():
"""Test `CombinedLoader` of mode 'max_size_cycle' given mapping loaders."""
loaders = {
"a": torch.utils.data.DataLoader(range(10), batch_size=4),
"b": torch.utils.data.DataLoader(range(20), batch_size=5),
}
combined_loader = CombinedLoader(loaders, "max_size_cycle")
assert len(combined_loader) == max(len(v) for v in loaders.values())
for idx, item in enumerate(combined_loader):
assert isinstance(item, dict)
assert len(item) == 2
assert "a" in item and "b" in item
assert idx == len(combined_loader) - 1
def test_combined_loader_sequence_min_size():
"""Test `CombinedLoader` of mode 'min_size' given sequence loaders."""
loaders = [
torch.utils.data.DataLoader(range(10), batch_size=4),
torch.utils.data.DataLoader(range(20), batch_size=5),
]
combined_loader = CombinedLoader(loaders, "min_size")
assert len(combined_loader) == min(len(v) for v in loaders)
for idx, item in enumerate(combined_loader):
assert isinstance(item, Sequence)
assert len(item) == 2
assert idx == len(combined_loader) - 1
class TestIterableDataset(IterableDataset):
def __init__(self, size: int = 10):
self.size = size
def __iter__(self):
self.sampler = SequentialSampler(range(self.size))
self.sampler_iter = iter(self.sampler)
return self
def __next__(self):
return next(self.sampler_iter)
@pytest.mark.parametrize("mode", ["min_size", "max_size_cycle"])
@pytest.mark.parametrize("use_multiple_dataloaders", [False, True])
def test_combined_loader_sequence_iterable_dataset(mode, use_multiple_dataloaders):
"""Test `CombinedLoader` of mode 'min_size' given sequence loaders."""
if use_multiple_dataloaders:
loaders = [
torch.utils.data.DataLoader(TestIterableDataset(10), batch_size=2),
torch.utils.data.DataLoader(TestIterableDataset(20), batch_size=2),
]
else:
loaders = [
torch.utils.data.DataLoader(TestIterableDataset(10), batch_size=2),
]
combined_loader = CombinedLoader(loaders, mode)
has_break = False
for idx, item in enumerate(combined_loader):
assert isinstance(item, Sequence)
assert len(item) == 2 if use_multiple_dataloaders else 1
if not use_multiple_dataloaders and idx == 4:
has_break = True
break
if mode == "max_size_cycle":
assert combined_loader.loaders[0].state.done == (not has_break)
expected = (10 if mode == "max_size_cycle" else 5) if use_multiple_dataloaders else 5
assert (expected - 1) == idx, (mode, use_multiple_dataloaders)
@pytest.mark.parametrize("lengths", [[4, 6], [5, 5], [6, 4]])
def test_combined_loader_sequence_with_map_and_iterable(lengths):
class MyIterableDataset(IterableDataset):
def __init__(self, size: int = 10):
self.size = size
def __iter__(self):
self.sampler = SequentialSampler(range(self.size))
self.iter_sampler = iter(self.sampler)
return self
def __next__(self):
return next(self.iter_sampler)
class MyMapDataset(Dataset):
def __init__(self, size: int = 10):
self.size = size
def __getitem__(self, index):
return index
def __len__(self):
return self.size
x, y = lengths
loaders = [DataLoader(MyIterableDataset(x)), DataLoader(MyMapDataset(y))]
dataloader = CombinedLoader(loaders, mode="max_size_cycle")
counter = 0
for _ in dataloader:
counter += 1
assert counter == max(x, y)
def test_combined_loader_sequence_max_size_cycle():
"""Test `CombinedLoader` of mode 'max_size_cycle' given sequence loaders."""
loaders = [
torch.utils.data.DataLoader(range(10), batch_size=4),
torch.utils.data.DataLoader(range(20), batch_size=5),
]
combined_loader = CombinedLoader(loaders, "max_size_cycle")
assert len(combined_loader) == max(len(v) for v in loaders)
for idx, item in enumerate(combined_loader):
assert isinstance(item, Sequence)
assert len(item) == 2
assert idx == len(combined_loader) - 1
@pytest.mark.parametrize(
["input_data", "compute_func", "expected_length"],
[
([*range(10), list(range(1, 20))], min, 0),
([*range(10), list(range(1, 20))], max, 19),
([*range(10), {str(i): i for i in range(1, 20)}], min, 0),
([*range(10), {str(i): i for i in range(1, 20)}], max, 19),
({**{str(i): i for i in range(10)}, "nested": {str(i): i for i in range(1, 20)}}, min, 0),
({**{str(i): i for i in range(10)}, "nested": {str(i): i for i in range(1, 20)}}, max, 19),
({**{str(i): i for i in range(10)}, "nested": list(range(20))}, min, 0),
({**{str(i): i for i in range(10)}, "nested": list(range(20))}, max, 19),
],
)
def test_nested_calc_num_data(input_data, compute_func, expected_length):
calculated_length = _nested_calc_num_data(input_data, compute_func)
assert calculated_length == expected_length
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1", "PL_TRAINER_GPUS": "2"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_combined_data_loader_validation_test(cuda_available_mock, device_count_mock, tmpdir):
"""This test makes sure distributed sampler has been properly injected in dataloaders when using
CombinedLoader."""
class CustomDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
dataloader = CombinedLoader(
{
"a": DataLoader(CustomDataset(range(10))),
"b": {"c": DataLoader(CustomDataset(range(10))), "d": DataLoader(CustomDataset(range(10)))},
"e": [DataLoader(CustomDataset(range(10))), DataLoader(CustomDataset(range(10)))],
}
)
trainer = Trainer(replace_sampler_ddp=True, accelerator="ddp", gpus=2)
dataloader = trainer.auto_add_sampler(dataloader, shuffle=True)
_count = 0
def _assert_distributed_sampler(v):
nonlocal _count
_count += 1
assert isinstance(v, DistributedSampler)
apply_to_collection(dataloader.sampler, Sampler, _assert_distributed_sampler)
assert _count == 5
| [
"torch.tensor",
"torch.randn"
] | 1.6 | Benjamin-Etheredge/pytorch-lightning | fe572c5911abfa2cc0b806b1c2cfe977d483c7c1 |
1.10 | import csv
import pickle
import os
import logging
from tqdm import tqdm, trange
from torch.utils.data import TensorDataset
import torch.nn.functional as F
import numpy as np
import torch
from collections import OrderedDict
from transformers.utils.dummy_tokenizers_objects import BertTokenizerFast
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
# 这就是包内引用吗
import json
import re
from transformers import AutoTokenizer
keyword_files = ["keyword_train.txt", "keyword_dev.txt", "keyword_test.txt"]
def tokenize(text, tokenizer):
# berts tokenize ways
# tokenize the [unused12345678910]
D = [f"[unused{i}]" for i in range(10)]
textraw = [text]
for delimiter in D:
ntextraw = []
for i in range(len(textraw)):
t = textraw[i].split(delimiter)
for j in range(len(t)):
ntextraw += [t[j]]
if j != len(t)-1:
ntextraw += [delimiter]
textraw = ntextraw
text = []
for t in textraw:
if t in D:
text += [t]
else:
tokens = tokenizer.tokenize(t, add_special_tokens=False)
for tok in tokens:
text += [tok]
for idx, t in enumerate(text):
if idx + 3 < len(text) and t == "[" and text[idx+1] == "[UNK]" and text[idx+2] == "]":
text = text[:idx] + ["[MASK]"] + text[idx+3:]
return text
n_class = 1
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, text_c=None, entity=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.text_c = text_c
self.label = label
self.entity = entity
class InputExampleSST2(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, text_c=None, entity=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeaturesSST2(object):
"""A single set of features of data."""
def __init__(self, input_ids, attention_mask, token_type_ids, label_id):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label_id = label_id
class InputExampleWiki80(object):
"""A single training/test example for span pair classification."""
def __init__(self, guid, sentence, span1, span2, ner1, ner2, label):
self.guid = guid
self.sentence = sentence
self.span1 = span1
self.span2 = span2
self.ner1 = ner1
self.ner2 = ner2
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, entity=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.entity = entity
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def __init__(self, data_dir, a):
super().__init__()
self.data_dir = data_dir
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
text_index = 0
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[text_index]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, text_c=None, label=label))
return examples
class relossProcessor(DataProcessor): #bert_s
def __init__(self, data_path="data", use_prompt=False):
def is_speaker(a):
a = a.split()
return len(a) == 2 and a[0] == "speaker" and a[1].isdigit()
# replace the speaker with [unused] token
def rename(d, x, y):
d = d.replace("’","'")
d = d.replace("im","i")
d = d.replace("...",".")
unused = ["[unused1]", "[unused2]"]
a = []
if is_speaker(x):
a += [x]
else:
a += [None]
if x != y and is_speaker(y):
a += [y]
else:
a += [None]
for i in range(len(a)):
if a[i] is None:
continue
d = d.replace(a[i] + ":", unused[i] + " :")
if x == a[i]:
x = unused[i]
if y == a[i]:
y = unused[i]
return d, x, y
self.D = [[], [], []]
for sid in range(3):
# 分成三个数据集
with open(data_path + "/"+["train.json", "dev.json", "test.json"][sid], "r", encoding="utf8") as f:
data = json.load(f)
for i in range(len(data)):
for j in range(len(data[i][1])):
rid = []
for k in range(36):
if k+1 in data[i][1][j]["rid"]:
rid += [1]
else:
rid += [0]
d, h, t = rename(' '.join(data[i][0]).lower(), data[i][1][j]["x"].lower(), data[i][1][j]["y"].lower())
prompt = f"what is the relation between {h} and {t} ? {t} is the [MASK] {h} ."
d = [
prompt + d,
h,
t,
rid,
t
]
self.D[sid] += [d]
logger.info(str(len(self.D[0])) + "," + str(len(self.D[1])) + "," + str(len(self.D[2])))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[0], "train")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[2], "test")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[1], "dev")
def get_labels(self):
"""See base class."""
return [str(x) for x in range(36)]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, d) in enumerate(data):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(guid=guid, text_a=data[i][0], text_b=data[i][1], label=data[i][3], text_c=data[i][2], entity=data[i][4]))
return examples
class bertProcessor(DataProcessor): #bert_s
def __init__(self, data_path="data", use_prompt=False):
def is_speaker(a):
a = a.split()
return len(a) == 2 and a[0] == "speaker" and a[1].isdigit()
# replace the speaker with [unused] token
def rename(d, x, y):
d = d.replace("’","'")
d = d.replace("im","i")
d = d.replace("...",".")
unused = ["[unused1]", "[unused2]"]
a = []
if is_speaker(x):
a += [x]
else:
a += [None]
if x != y and is_speaker(y):
a += [y]
else:
a += [None]
for i in range(len(a)):
if a[i] is None:
continue
d = d.replace(a[i] + ":", unused[i] + " :")
if x == a[i]:
x = unused[i]
if y == a[i]:
y = unused[i]
return d, x, y
self.D = [[], [], []]
for sid in range(3):
# 分成三个数据集
with open(data_path + "/"+["train.json", "dev.json", "test.json"][sid], "r", encoding="utf8") as f:
data = json.load(f)
sample_idx = 0
for i in range(len(data)):
for j in range(len(data[i][1])):
rid = []
for k in range(36):
if k+1 in data[i][1][j]["rid"]:
rid += [1]
else:
rid += [0]
d, h, t = rename(' '.join(data[i][0]).lower(), data[i][1][j]["x"].lower(), data[i][1][j]["y"].lower())
if use_prompt:
prompt = f"{h} is the [MASK] {t} ."
else:
prompt = f"what is the relation between {h} and {t} ?"
sample_idx += 1
d = [
prompt + d,
h,
t,
rid,
]
self.D[sid] += [d]
logger.info(str(len(self.D[0])) + "," + str(len(self.D[1])) + "," + str(len(self.D[2])))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[0], "train")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[2], "test")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[1], "dev")
def get_labels(self):
"""See base class."""
return [str(x) for x in range(36)]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, d) in enumerate(data):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(guid=guid, text_a=data[i][0], text_b=data[i][1], label=data[i][3], text_c=data[i][2]))
return examples
class ptuneProcessor(DataProcessor): #bert_s
def __init__(self, data_path="data", use_prompt=False, ptune_k=6):
def is_speaker(a):
a = a.split()
return len(a) == 2 and a[0] == "speaker" and a[1].isdigit()
# replace the speaker with [unused] token
def rename(d, x, y):
d = d.replace("’","'")
d = d.replace("im","i")
d = d.replace("...",".")
unused = ["[unused1]", "[unused2]"]
a = []
if is_speaker(x):
a += [x]
else:
a += [None]
if x != y and is_speaker(y):
a += [y]
else:
a += [None]
for i in range(len(a)):
if a[i] is None:
continue
d = d.replace(a[i] + ":", unused[i] + " :")
if x == a[i]:
x = unused[i]
if y == a[i]:
y = unused[i]
return d, x, y
self.D = [[], [], []]
"""
TODO, add new samples, every sample if there is a trigger then mask trigger and replace the origin mask with right token,
if no trigger in the sentence, random mask a word in the sentence and replace the origin mask with the right token.
"""
for sid in range(3):
# 分成三个数据集
with open(data_path + "/"+["train.json", "dev.json", "test.json"][sid], "r", encoding="utf8") as f:
data = json.load(f)
sample_idx = 0
for i in range(len(data)):
for j in range(len(data[i][1])):
rid = []
for k in range(36):
if k+1 in data[i][1][j]["rid"]:
rid += [1]
else:
rid += [0]
d, h, t = rename(' '.join(data[i][0]).lower(), data[i][1][j]["x"].lower(), data[i][1][j]["y"].lower())
unused_word = " ".join([f"[unused{i}]" for i in range(3, ptune_k+3)])
# st 3,4 ; ed 5,6
st = [f"[unused{i}]" for i in range(3,5)]
ed = [f"[unused{i}]" for i in range(5,7)]
# 789 as prompt
prompt = f"[sub] {st[0]} {h} {st[1]} [sub] [unused7] [unused8] [MASK] [unused9] [obj] {ed[0]} {t} {ed[1]} [obj]."
# for temp_i in range(10):
# d = d.replace(f"speaker {temp_i}:", f"[speaker{temp_i}]")
sample_idx += 1
sample = [
prompt + d,
h,
t,
rid,
]
self.D[sid] += [sample]
# multi labels, add more data in the training set
if i == 0:
for idx,trigger in enumerate(data[i][1][j]['t']):
if trigger != "":
label_token = f"[class{data[i][1][j]['rid'][idx]+1}]"
prompt = prompt.replace("[MASK]", label_token)
# first assume the model predict the same output in the trigger, ...
d = d.replace(trigger, "[MASK]", 1)
sample = [
prompt + d,
h,
t,
rid,
]
self.D[sid] += [sample]
logger.info(str(len(self.D[0])) + "," + str(len(self.D[1])) + "," + str(len(self.D[2])))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[0], "train")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[2], "test")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[1], "dev")
def get_labels(self):
"""See base class."""
return [str(x) for x in range(36)]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, d) in enumerate(data):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(guid=guid, text_a=data[i][0], text_b=data[i][1], label=data[i][3], text_c=data[i][2]))
return examples
class wiki80Processor(DataProcessor):
"""Processor for the TACRED data set."""
def __init__(self, data_path, use_prompt):
super().__init__()
self.data_dir = data_path
@classmethod
def _read_json(cls, input_file):
data = []
with open(input_file, "r", encoding='utf-8') as reader:
all_lines = reader.readlines()
for line in all_lines:
ins = eval(line)
data.append(ins)
return data
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "val.txt")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self, negative_label="no_relation"):
data_dir = self.data_dir
"""See base class."""
# if 'k-shot' in self.data_dir:
# data_dir = os.path.abspath(os.path.join(self.data_dir, "../.."))
# else:
# data_dir = self.data_dir
with open(os.path.join(data_dir,'rel2id.json'), "r", encoding='utf-8') as reader:
re2id = json.load(reader)
return re2id
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for example in dataset:
sentence = example['token']
examples.append(InputExampleWiki80(guid=None,
sentence=sentence,
# maybe some bugs here, I don't -1
span1=(example['h']['pos'][0], example['h']['pos'][1]),
span2=(example['t']['pos'][0], example['t']['pos'][1]),
ner1=None,
ner2=None,
label=example['relation']))
return examples
def convert_examples_to_features_for_loss(examples, max_seq_length, tokenizer):
print("#examples", len(examples))
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenize(example.text_a, tokenizer)
tokens_b = tokenize(example.text_b, tokenizer)
tokens_c = tokenize(example.text_c, tokenizer)
# t_tokens = tokenize(example.entity, tokenizer)
t_tokens = tokenizer(example.entity, add_special_tokens=False)["input_ids"]
_truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)
tokens_b = tokens_b + ["[SEP]"] + tokens_c
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = example.label
len_t = len(t_tokens)
normal_input_ids = input_ids[:]
for idx, input_id in enumerate(input_ids):
if idx + len_t < len(input_ids) and input_ids[idx:idx+len_t] == t_tokens:
# [MASK] id = 103
for j in range(len_t):
input_ids[j+idx] = 103
# append 1 sample with 2 input
features.append(
[InputFeatures(
input_ids=normal_input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
entity = t_tokens
),
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
entity = t_tokens
)]
)
print('#features', len(features))
return features
def convert_examples_to_features_normal(examples, max_seq_length, tokenizer):
print("#examples", len(examples))
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenize(example.text_a, tokenizer)
tokens_b = tokenize(example.text_b, tokenizer)
tokens_c = tokenize(example.text_c, tokenizer)
_truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)
tokens_b = tokens_b + ["[SEP]"] + tokens_c
inputs = tokenizer(
example.text_a,
example.text_b + tokenizer.sep_token + example.text_c,
truncation="longest_first",
max_length=max_seq_length,
padding="max_length",
add_special_tokens=True
)
# tokens = []
# segment_ids = []
# tokens.append("[CLS]")
# segment_ids.append(0)
# for token in tokens_a:
# tokens.append(token)
# segment_ids.append(0)
# tokens.append("[SEP]")
# segment_ids.append(0)
# for token in tokens_b:
# tokens.append(token)
# segment_ids.append(1)
# tokens.append("[SEP]")
# segment_ids.append(1)
# input_ids = tokenizer.convert_tokens_to_ids(tokens)
# # The mask has 1 for real tokens and 0 for padding tokens. Only real
# # tokens are attended to.
# input_mask = [1] * len(input_ids)
# # Zero-pad up to the sequence length.
# while len(input_ids) < max_seq_length:
# input_ids.append(0)
# input_mask.append(0)
# segment_ids.append(0)
# assert(inputs['input_ids'] == input_ids), print(inputs['input_ids'])
# assert len(input_ids) == max_seq_length
# assert len(input_mask) == max_seq_length
# assert len(segment_ids) == max_seq_length
label_id = example.label
if ex_index == 0:
logger.info(f"input_text : {tokens_a} {tokens_b} {tokens_c}")
logger.info(f"input_ids : {inputs['input_ids']}")
logger.info(f"token_type_ids : {inputs['token_type_ids']}")
# inputs = {}
# inputs['input_ids'] = input_ids
# inputs['attention_mask'] = input_mask
# inputs['token_type_ids'] = segment_ids
# append 1 sample with 2 input
features.append(
InputFeatures(
input_ids=inputs['input_ids'],
input_mask=inputs['attention_mask'],
segment_ids=inputs['token_type_ids'],
label_id=label_id,
)
)
print('#features', len(features))
return features
def convert_examples_to_features(examples, max_seq_length, tokenizer, args, rel2id):
"""Loads a data file into a list of `InputBatch`s."""
save_file = "data/cached_wiki80.pkl"
mode = "text"
num_tokens = 0
num_fit_examples = 0
num_shown_examples = 0
instances = []
use_bert = "BertTokenizer" in tokenizer.__class__.__name__
use_gpt = "GPT" in tokenizer.__class__.__name__
assert not (use_bert and use_gpt), "model cannot be gpt and bert together"
if False:
with open(file=save_file, mode='rb') as fr:
instances = pickle.load(fr)
print('load preprocessed data from {}.'.format(save_file))
else:
print('loading..')
for (ex_index, example) in enumerate(examples):
"""
the relation between SUBJECT and OBJECT is .
"""
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens = []
SUBJECT_START = "[subject_start]"
SUBJECT_END = "[subject_end]"
OBJECT_START = "[object_start]"
OBJECT_END = "[object_end]"
if mode.startswith("text"):
for i, token in enumerate(example.sentence):
if i == example.span1[0]:
tokens.append(SUBJECT_START)
if i == example.span2[0]:
tokens.append(OBJECT_START)
# for sub_token in tokenizer.tokenize(token):
# tokens.append(sub_token)
if i == example.span1[1]:
tokens.append(SUBJECT_END)
if i == example.span2[1]:
tokens.append(OBJECT_END)
tokens.append(token)
SUBJECT = " ".join(example.sentence[example.span1[0]: example.span1[1]])
OBJECT = " ".join(example.sentence[example.span2[0]: example.span2[1]])
SUBJECT_ids = tokenizer(" "+SUBJECT, add_special_tokens=False)['input_ids']
OBJECT_ids = tokenizer(" "+OBJECT, add_special_tokens=False)['input_ids']
if use_gpt:
if args.CT_CL:
prompt = f"[T1] [T2] [T3] [sub] {OBJECT} [sub] [T4] [obj] {SUBJECT} [obj] [T5] {tokenizer.cls_token}"
else:
prompt = f"The relation between [sub] {SUBJECT} [sub] and [obj] {OBJECT} [obj] is {tokenizer.cls_token} ."
else:
# add prompt [T_n] and entity marker [obj] to enrich the context.
prompt = f"[sub] {SUBJECT} [sub] {tokenizer.mask_token} [obj] {OBJECT} [obj] ."
if ex_index == 0:
input_text = " ".join(tokens)
logger.info(f"input text : {input_text}")
logger.info(f"prompt : {prompt}")
logger.info(f"label : {example.label}")
inputs = tokenizer(
prompt,
" ".join(tokens),
truncation="longest_first",
max_length=max_seq_length,
padding="max_length",
add_special_tokens=True
)
if use_gpt: cls_token_location = inputs['input_ids'].index(tokenizer.cls_token_id)
# find the subject and object tokens, choose the first ones
sub_st = sub_ed = obj_st = obj_ed = -1
for i in range(len(inputs['input_ids'])):
if sub_st == -1 and inputs['input_ids'][i:i+len(SUBJECT_ids)] == SUBJECT_ids:
sub_st = i
sub_ed = i + len(SUBJECT_ids)
if obj_st == -1 and inputs['input_ids'][i:i+len(OBJECT_ids)] == OBJECT_ids:
obj_st = i
obj_ed = i + len(OBJECT_ids)
assert sub_st != -1 and obj_st != -1
num_tokens += sum(inputs['attention_mask'])
if sum(inputs['attention_mask']) > max_seq_length:
pass
# tokens = tokens[:max_seq_length]
else:
num_fit_examples += 1
x = OrderedDict()
x['input_ids'] = inputs['input_ids']
if use_bert: x['token_type_ids'] = inputs['token_type_ids']
x['attention_mask'] = inputs['attention_mask']
x['label'] = rel2id[example.label]
if use_gpt: x['cls_token_location'] = cls_token_location
x['so'] =[sub_st, sub_ed, obj_st, obj_ed]
instances.append(x)
with open(file=save_file, mode='wb') as fw:
pickle.dump(instances, fw)
print('Finish save preprocessed data to {}.'.format( save_file))
input_ids = [o['input_ids'] for o in instances]
attention_mask = [o['attention_mask'] for o in instances]
if use_bert: token_type_ids = [o['token_type_ids'] for o in instances]
if use_gpt: cls_idx = [o['cls_token_location'] for o in instances]
labels = [o['label'] for o in instances]
so = torch.tensor([o['so'] for o in instances])
input_ids = torch.tensor(input_ids)
attention_mask = torch.tensor(attention_mask)
if use_gpt: cls_idx = torch.tensor(cls_idx)
if use_bert: token_type_ids = torch.tensor(token_type_ids)
labels = torch.tensor(labels)
logger.info("Average #tokens: %.2f" % (num_tokens * 1.0 / len(examples)))
logger.info("%d (%.2f %%) examples can fit max_seq_length = %d" % (num_fit_examples,
num_fit_examples * 100.0 / len(examples), max_seq_length))
if use_gpt:
dataset = TensorDataset(input_ids, attention_mask, cls_idx, labels)
elif use_bert:
dataset = TensorDataset(input_ids, attention_mask, token_type_ids, labels, so)
else:
dataset = TensorDataset(input_ids, attention_mask, labels)
return dataset
def convert_examples_to_feature_sst2(examples, max_seq_length, tokenizer, args, rel2id):
"""Loads a data file into a list of `InputBatch`s."""
save_file = "data/cached_wiki80.pkl"
mode = "text"
num_tokens = 0
num_fit_examples = 0
num_shown_examples = 0
instances = []
if False:
with open(file=save_file, mode='rb') as fr:
instances = pickle.load(fr)
print('load preprocessed data from {}.'.format(save_file))
else:
print('loading..')
for (ex_index, example) in enumerate(examples):
try:
prompt = f"[T1] [T2] {tokenizer.mask_token} ."
inputs = tokenizer(
example.text_a + prompt,
truncation="longest_first",
max_length=max_seq_length,
padding="max_length",
add_special_tokens=True
)
x = OrderedDict()
x['input_ids'] = inputs['input_ids']
x['attention_mask'] = inputs['attention_mask']
if "roberta" not in args.model_name_or_path:
x['token_type_ids'] = inputs['token_type_ids']
x['label'] = int(example.label)
instances.append(x)
except Exception as e:
print(e)
with open(file=save_file, mode='wb') as fw:
pickle.dump(instances, fw)
print('Finish save preprocessed data to {}.'.format( save_file))
input_ids = [o['input_ids'] for o in instances]
attention_mask = [o['attention_mask'] for o in instances]
if "roberta" not in args.model_name_or_path:
token_type_ids = [o['token_type_ids'] for o in instances]
token_type_ids = torch.tensor(token_type_ids)
labels = [o['label'] for o in instances]
input_ids = torch.tensor(input_ids)
attention_mask = torch.tensor(attention_mask)
labels = torch.tensor(labels)
logger.info("Average #tokens: %.2f" % (num_tokens * 1.0 / len(examples)))
logger.info("%d (%.2f %%) examples can fit max_seq_length = %d" % (num_fit_examples,
num_fit_examples * 100.0 / len(examples), max_seq_length))
if "roberta" not in args.model_name_or_path:
dataset = TensorDataset(input_ids, attention_mask, token_type_ids, labels)
else:
dataset = TensorDataset(input_ids, attention_mask, labels)
return dataset
def _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_length):
"""Truncates a sequence tuple in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b) + len(tokens_c)
if total_length <= max_length:
break
if len(tokens_a) >= len(tokens_b) and len(tokens_a) >= len(tokens_c):
tokens_a.pop()
elif len(tokens_b) >= len(tokens_a) and len(tokens_b) >= len(tokens_c):
tokens_b.pop()
else:
tokens_c.pop()
def get_dataset(mode, args, tokenizer, processor):
if mode == "train":
examples = processor.get_train_examples(args.data_dir)
elif mode == "dev":
examples = processor.get_dev_examples(args.data_dir)
elif mode == "test":
examples = processor.get_test_examples(args.data_dir)
else:
raise Exception("mode must be in choice [trian, dev, test]")
gpt_mode = "wiki80" in args.task_name
if "wiki80" in args.task_name:
# normal relation extraction task
dataset = convert_examples_to_features(
examples, args.max_seq_length, tokenizer, args, processor.get_labels()
)
return dataset
elif "sst" in args.task_name:
dataset = convert_examples_to_feature_sst2(
examples, args.max_seq_length, tokenizer, args, None
)
return dataset
else:
train_features = convert_examples_to_features_normal(
examples, args.max_seq_length, tokenizer
)
input_ids = []
input_mask = []
segment_ids = []
label_id = []
entity_id = []
for f in train_features:
input_ids.append(f.input_ids)
input_mask.append(f.input_mask)
segment_ids.append(f.segment_ids)
label_id.append(f.label_id)
all_input_ids = torch.tensor(input_ids, dtype=torch.long)
all_input_mask = torch.tensor(input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long)
all_label_ids = torch.tensor(label_id, dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return train_data
def collate_fn(batch):
pass
processors = {"normal": bertProcessor, "reloss": relossProcessor , "ptune": ptuneProcessor, "wiki80": wiki80Processor,
"sst-2": Sst2Processor
} | [
"torch.tensor",
"torch.utils.data.TensorDataset"
] | 1.10 | johncolezhang/DeepKE | ea4552ec42cb003a835f00fc14fb454f9a9a7183 |
1.8 | import torch
import torch.nn as nn
from torch.nn.utils.rnn import PackedSequence
from src.utils.mapper import configmapper
def hotfix_pack_padded_sequence(
input, lengths, batch_first=False, enforce_sorted=False
):
lengths = torch.as_tensor(lengths, dtype=torch.int64)
lengths = lengths.cpu()
if enforce_sorted:
sorted_indices = None
else:
lengths, sorted_indices = torch.sort(lengths, descending=True)
sorted_indices = sorted_indices.to(input.device)
batch_dim = 0 if batch_first else 1
input = input.index_select(batch_dim, sorted_indices)
data, batch_sizes = torch._C._VariableFunctions._pack_padded_sequence(
input, lengths, batch_first
)
return PackedSequence(data, batch_sizes, sorted_indices)
# class LSTM(nn.Module):
# def __init__(
# self,
# input_size,
# hidden_size,
# batch_first=False,
# num_layers=1,
# bidirectional=False,
# dropout=0.2,
# ):
# super(LSTM, self).__init__()
# self.rnn = nn.LSTM(
# input_size=input_size,
# hidden_size=hidden_size,
# num_layers=num_layers,
# bidirectional=bidirectional,
# batch_first=batch_first,
# )
# self.reset_params()
# self.dropout = nn.Dropout(p=dropout)
# def reset_params(self):
# for i in range(self.rnn.num_layers):
# nn.init.orthogonal_(getattr(self.rnn, f"weight_hh_l{i}"))
# nn.init.kaiming_normal_(getattr(self.rnn, f"weight_ih_l{i}"))
# nn.init.constant_(getattr(self.rnn, f"bias_hh_l{i}"), val=0)
# nn.init.constant_(getattr(self.rnn, f"bias_ih_l{i}"), val=0)
# bias = getattr(self.rnn, f"bias_hh_l{i}").detach()
# bias.chunk(4)[1].fill_(1)
# with torch.no_grad():
# setattr(self.rnn, f"bias_hh_l{i}", nn.Parameter(bias))
# if self.rnn.bidirectional:
# nn.init.orthogonal_(getattr(self.rnn, f"weight_hh_l{i}_reverse"))
# nn.init.kaiming_normal_(getattr(self.rnn, f"weight_ih_l{i}_reverse"))
# nn.init.constant_(getattr(self.rnn, f"bias_hh_l{i}_reverse"), val=0)
# nn.init.constant_(getattr(self.rnn, f"bias_ih_l{i}_reverse"), val=0)
# bias = getattr(self.rnn, f"bias_hh_l{i}_reverse").detach()
# bias.chunk(4)[1].fill_(1)
# with torch.no_grad():
# setattr(self.rnn, f"bias_hh_l{i}_reverse", nn.Parameter(bias))
# def forward(self, x, x_len):
# # x: [batch_size, seq_len, dim], x_len:[batch_size]
# x_len_sorted, x_idx = torch.sort(x_len, descending=True)
# x_sorted = torch.index_select(x, dim=0, index=x_idx)
# sorted_x, x_ori_idx = torch.sort(x_idx)
# # x_packed = nn.utils.rnn.pack_padded_sequence(
# # x_sorted, x_len_sorted, batch_first=True
# # )
# x_packed = hotfix_pack_padded_sequence(x_sorted, x_len_sorted, batch_first=True)
# x_packed, (hidden, c) = self.rnn(x_packed)
# x = nn.utils.rnn.pad_packed_sequence(x_packed, batch_first=True)[0]
# x = x.index_select(dim=0, index=x_ori_idx)
# hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
# # hidden = hidden.permute(1, 0, 2).contiguous().view(-1,
# # hidden.size(0) * hidden.size(2)).squeeze()
# hidden = hidden.index_select(dim=0, index=x_ori_idx)
# return hidden, x
@configmapper.map("models", "lstm_model")
class LstmModel(nn.Module):
def __init__(
self,
vocab_size,
embedding_dim,
hidden_size,
lstm_num_layers,
bidirectional,
dropout,
num_labels,
):
super().__init__()
self.embedding = nn.Embedding(
vocab_size, embedding_dim, padding_idx=0
) # from_pretrained
self.lstm = nn.LSTM(
embedding_dim,
hidden_size,
lstm_num_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=dropout,
)
self.hidden_size = hidden_size
self.lstm_num_layers = lstm_num_layers
self.dropout = nn.Dropout(dropout)
self.mult = 2 if bidirectional else 1
self.linear = nn.Linear(hidden_size * self.mult, num_labels)
self.num_labels = num_labels
def forward(self, inputs, lengths, labels=None):
x = self.embedding(inputs)
x = self.dropout(x)
x_pack = hotfix_pack_padded_sequence(x, lengths, batch_first=True)
out_pack, (ht, ct) = self.lstm(x_pack)
ht = ht.view(self.lstm_num_layers, self.mult, -1, self.hidden_size)
logits = self.linear(
self.dropout(torch.cat([ht[-1, 0, :, :], ht[-1, 1, :, :]], dim=-1))
)
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss, logits
return logits
| [
"torch.nn.utils.rnn.PackedSequence",
"torch.nn.Embedding",
"torch.nn.Dropout",
"torch.nn.Linear",
"torch.nn.LSTM",
"torch.cat",
"torch.nn.CrossEntropyLoss",
"torch._C._VariableFunctions._pack_padded_sequence",
"torch.as_tensor",
"torch.sort"
] | 1.8.1 | gchhablani/financial-sentiment-analysis | b18e9072f8edb9f09d0fef697892f2462d6d44e9 |
1.5 | import tqdm
import json
import torch
import random
import argparse
from orderedset import OrderedSet
from collections import defaultdict
from outcomes.src.common import init_model
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--device", default="cpu", type=str, help="cpu or number for GPU device")
ap.add_argument("--copa_dir", default="data/copa", type=str, help="COPA data directory")
args = ap.parse_args()
device = torch.device(f"cuda:{args.device}") if args.device != "cpu" else torch.device("cpu")
with open(f"{args.copa_dir}/dev.jsonl") as f_in:
events = [json.loads(line.strip())["premise"] for line in f_in]
out = defaultdict(lambda: defaultdict(list))
lms = [(lm, *init_model(lm, device)) for lm in ["openai-gpt", "gpt2", "gpt2-xl"]]
for event in tqdm.tqdm(random.sample(events, 20)):
for lm, model, tokenizer in lms:
prefix = f"{event} As a result,"
preds_topk = generate(
tokenizer, model, prefix, device, num_return_sequences=10, max_length=10, k=10)
preds_topp = generate(
tokenizer, model, prefix, device, num_return_sequences=10, max_length=10, p=0.9)
preds_beam = generate(
tokenizer, model, prefix, device, num_return_sequences=5, max_length=10, beams=5)
out[event][f"{lm}_preds_top10"] = preds_topk
out[event][f"{lm}_preds_top0.9"] = preds_topp
out[event][f"{lm}_preds_beam5"] = preds_beam
print_latex_table(out)
def generate(tokenizer, model, prompt, device, num_return_sequences=1, max_length=10, beams=0, p=0, k=0):
"""
Generate a sequence with models like GPT, GPT2, or XLNet
"""
context_tokens = tokenizer.encode(prompt)
max_length = max_length + len(context_tokens)
input_ids = torch.tensor(context_tokens, device=device).unsqueeze(0)
eos_token_id = tokenizer.encode(".", add_special_tokens=False)[-1]
outputs = model.generate(
input_ids=input_ids,
do_sample=beams == 0,
max_length=max_length,
# temperature=temperature,
top_p=p if p > 0 else None,
top_k=k if k > 0 else None,
eos_token_id=eos_token_id,
num_beams=beams if beams > 0 else None,
early_stopping=True,
no_repeat_ngram_size=3,
num_return_sequences=num_return_sequences
)
preds = [tokenizer.decode(output, skip_special_tokens=True)[len(prompt):].strip() for output in outputs]
print(preds)
preds = list(OrderedSet([pred.split(".")[0].strip() for pred in preds]))
preds = [t for t in preds if len(t) > 0]
return preds
def print_latex_table(out):
"""
Print the example generated outcomes
"""
examples = [(event, fields)
for event, fields in out.items()
if len(fields) > 0 and
all([len(v) > 0 for v in fields.values()])]
print("""\\begin{tabular}{lllll}""")
print("""\\toprule""")
print("""\\textbf{Event} & \\textbf{LM} & \\textbf{Sampling} & \\textbf{Outcome} \\\\""")
print("\\midrule")
lms = ["openai-gpt", "gpt2", "gpt2-xl"]
for event, fields in examples:
print("\\multirow{9}{*}{\\textbf{" + event + "}} ")
by_lm = {lm: {k.replace(f"{lm}_preds_", ""): v[0] for k, v in fields.items() if lm in k} for lm in lms}
for i, lm in enumerate(lms):
for j, sampling in enumerate(["top10", "top0.9", "beam5"]):
first_col = "\\multirow{3}{*}{" + lm + "} " if j == 0 else ""
print(f"& {first_col} & {sampling} & {by_lm[lm][sampling]} \\\\")
if sampling == "beam5":
print("\\midrule")
if i == 2:
print("\\midrule")
print("\\midrule")
print("""\\bottomrule""")
print("""\end{tabular}""")
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.tensor"
] | 1.5.1 | vered1986/reporting_bias_lms | f4e3a26f41db30939c899855b413bad1ebe14d21 |
1.4 | import os, sys
import imageio
from opt import get_opts
import torch
from collections import defaultdict
from torch.utils.data import DataLoader
from datasets import dataset_dict
# models
from models.nerf import Embedding, NeRF
from models.rendering import render_rays
# optimizer, scheduler, visualization
from utils import *
# losses
from losses import loss_dict
# metrics
from metrics import *
# pytorch-lightning
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.logging import TestTubeLogger
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
class NeRFSystem(LightningModule):
def __init__(self, hparams):
super(NeRFSystem, self).__init__()
self.hparams = hparams
self.loss = loss_dict[hparams.loss_type]()
self.embedding_xyz = Embedding(3, 10) # 10 is the default number
self.embedding_dir = Embedding(3, 4) # 4 is the default number
self.embeddings = [self.embedding_xyz, self.embedding_dir]
self.nerf_coarse = NeRF()
self.models = [self.nerf_coarse]
if hparams.N_importance > 0:
self.nerf_fine = NeRF()
self.models += [self.nerf_fine]
def decode_batch(self, batch):
if self.hparams.dataset_name == 'pyredner':
all_rgb_gt = batch["rgb"] # (num_images, H*W, 3)
all_rgb_gt = all_rgb_gt.reshape(-1, 3)
cam_all_rays = batch["cam_ray_bundle"] # (num_images, H*W, 8)
cam_all_rays = cam_all_rays.reshape(-1, 8)
# light_all_rays = batch["light_ray_bundle"] # (num_images, H*W, 8)?
# light_all_rays = light_all_rays.reshape(-1, 8)
# shadow_maps = batch["shadow_maps"]
# shadow_maps = shadow_maps.reshape(-1, 3)
# shadow_maps = None
return all_rgb_gt, cam_all_rays, None, None
else:
rays = batch['rays'] # (B, 8)
# print("rays.shape",rays.shape)
rgbs = batch['rgbs'] # (B, 3)
# print("rgbs.shape",rgbs.shape)
# print("decode batch", rays.shape, rgbs.shape)
return rays, rgbs
def forward(self, rays):
"""Do batched inference on rays using chunk."""
B = rays.shape[0]
results = defaultdict(list)
for i in range(0, B, self.hparams.chunk):
rendered_ray_chunks = \
render_rays(self.models,
self.embeddings,
rays[i:i+self.hparams.chunk],
self.hparams.N_samples,
self.hparams.use_disp,
self.hparams.perturb,
self.hparams.noise_std,
self.hparams.N_importance,
self.hparams.chunk, # chunk size is effective in val mode
self.train_dataset.white_back)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 0)
return results
def prepare_data(self):
dataset = dataset_dict[self.hparams.dataset_name]
kwargs = {'root_dir': self.hparams.root_dir,
'img_wh': tuple(self.hparams.img_wh),
'hparams': self.hparams
}
if self.hparams.dataset_name == 'llff':
kwargs['spheric_poses'] = self.hparams.spheric_poses
kwargs['val_num'] = self.hparams.num_gpus
self.train_dataset = dataset(split='train', **kwargs)
self.val_dataset = dataset(split='val', **kwargs)
def configure_optimizers(self):
self.optimizer = get_optimizer(self.hparams, self.models)
scheduler = get_scheduler(self.hparams, self.optimizer)
return [self.optimizer], [scheduler]
def train_dataloader(self):
return DataLoader(self.train_dataset,
shuffle=True,
num_workers=4,
batch_size=self.hparams.batch_size,
pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val_dataset,
shuffle=False,
num_workers=4,
batch_size=1, # validate one image (H*W rays) at a time
pin_memory=True)
def training_step(self, batch, batch_nb):
log = {'lr': get_learning_rate(self.optimizer)}
if self.hparams.dataset_name == 'pyredner':
rgbs, cam_all_rays, _, _ = self.decode_batch(batch)
results = self(cam_all_rays)
else:
rays, rgbs = self.decode_batch(batch)
results = self(rays)
log['train/loss'] = loss = self.loss(results, rgbs)
typ = 'fine' if 'rgb_fine' in results else 'coarse'
with torch.no_grad():
psnr_ = psnr(results[f'rgb_{typ}'], rgbs)
log['train/psnr'] = psnr_
return {'loss': loss,
'progress_bar': {'train_psnr': psnr_},
'log': log
}
def validation_step(self, batch, batch_nb):
print("---------------Starting Validation---------------")
if self.hparams.dataset_name == 'pyredner':
rgbs, cam_all_rays, _, _ = self.decode_batch(batch)
rays = cam_all_rays.squeeze() # (H*W, 3)
rgbs = rgbs.squeeze() # (H*W, 3)
results = self(cam_all_rays)
else:
rays, rgbs = self.decode_batch(batch)
rays = rays.squeeze() # (H*W,3)
rgbs = rgbs.squeeze() # (H*W,3)
results = self(rays)
log = {'val_loss': self.loss(results, rgbs)}
typ = 'fine' if 'rgb_fine' in results else 'coarse'
if batch_nb == 0:
print("---------------Evaluating and saving Images!---------------")
W, H = self.hparams.img_wh
img = results[f'rgb_{typ}'].view(H, W, 3).cpu()
rgb8 = to8b(img.numpy())
gt8 = to8b(rgbs.view(H, W, 3).cpu().numpy())
img = img.permute(2, 0, 1) # (3, H, W)
img_gt = rgbs.view(H, W, 3).permute(2, 0, 1).cpu() # (3, H, W)
depth8 = visualize_depth(results[f'depth_{typ}'].view(H, W), to_tensor=False)
depth = visualize_depth(results[f'depth_{typ}'].view(H, W)) # (3, H, W)
if not os.path.exists(f'logs/{self.hparams.exp_name}/imgs'):
os.mkdir(f'logs/{self.hparams.exp_name}/imgs')
filename = os.path.join(f'logs/{self.hparams.exp_name}/imgs', 'gt_{:03d}.png'.format(self.current_epoch))
imageio.imwrite(filename, gt8)
filename = os.path.join(f'logs/{self.hparams.exp_name}/imgs', 'rgb_{:03d}.png'.format(self.current_epoch))
imageio.imwrite(filename, rgb8)
filename = os.path.join(f'logs/{self.hparams.exp_name}/imgs', 'depth_{:03d}.png'.format(self.current_epoch))
imageio.imwrite(filename, depth8)
stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)
self.logger.experiment.add_images('val/GT_pred_depth',
stack, self.global_step)
log['val_psnr'] = psnr(results[f'rgb_{typ}'], rgbs)
return log
def validation_epoch_end(self, outputs):
mean_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
mean_psnr = torch.stack([x['val_psnr'] for x in outputs]).mean()
return {'progress_bar': {'val_loss': mean_loss,
'val_psnr': mean_psnr},
'log': {'val/loss': mean_loss,
'val/psnr': mean_psnr}
}
if __name__ == '__main__':
hparams = get_opts()
system = NeRFSystem(hparams)
checkpoint_callback = ModelCheckpoint(filepath=os.path.join(f'ckpts/{hparams.exp_name}',
'{epoch:d}'),
monitor='val/loss',
mode='min',
save_top_k=5,)
logger = TestTubeLogger(
save_dir="logs",
name=hparams.exp_name,
debug=False,
create_git_tag=False
)
trainer = Trainer(max_epochs=hparams.num_epochs,
checkpoint_callback=checkpoint_callback,
resume_from_checkpoint=hparams.ckpt_path,
logger=logger,
early_stop_callback=None,
weights_summary=None,
progress_bar_refresh_rate=1,
gpus=hparams.num_gpus,
distributed_backend='ddp' if len(hparams.num_gpus)>1 else None,
num_sanity_val_steps=hparams.num_sanity_val_steps,
benchmark=True,
profiler=hparams.num_gpus==1,
auto_scale_batch_size=True)
trainer.fit(system) | [
"torch.cat",
"torch.stack",
"torch.utils.data.DataLoader",
"torch.no_grad"
] | 1.4.0 | ktiwary2/nerf_pl | 99d40cba3a2d9a11d6988cb1a74cf29035a1ab5e |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.