Dataset Viewer
version
stringclasses 24
values | code
stringlengths 396
135k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 6
64
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.8 | import numpy as np
import torch
from discrete_network.network import KNNet, KNNetParameters, KNNetState
from discrete_network.method.force_method import ForceParameters, ForceLearn
from discrete_network.device import device
import matplotlib.pyplot as plt
print(f"Device = {device.type}")
# params_spiking = KNNetParameters(eps = 0.015, beta = 0.0, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
# params_spiking = KNNetParameters(eps = 0.015, beta = 0.03, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
# params_spiking = KNNetParameters(eps = 0.015, beta = 0.05, d = 0.26, a = 0.25, J = 0.15)
# normal spike
# params_spiking = KNNetParameters(eps = 0.02, beta = 0.0, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
# params_spiking = KNNetParameters(eps = 0.03, beta = 0.035, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
def one_neuron(x0, y0, iteration, p: KNNetParameters):
"""The dynamics of one neuron. Return x, y."""
x, y = np.zeros(iteration), np.zeros(iteration)
x[0], y[0] = x0, y0
for i in range(iteration - 1):
x[i + 1] = (
x[i]
+ x[i] * (x[i] - p.a) * (1 - x[i])
- p.beta * (x[i] > p.d)
- y[i]
)
y[i + 1] = y[i] + p.eps * (x[i] - p.J)
return x, y
imin = 0; icrit = 20000; nt = 21000
input_size = 0
hidden_size = 2000
output_size = 2
eps_start = 0.01
eps_stop = 0.1
eps = eps_start + (eps_stop - eps_start) * torch.rand(hidden_size, 1).to(device)
a = 0.25
J = (1 + a - torch.sqrt(1 + a * a - a + 3 * eps)) / 3
J = J.to(device)
p = KNNetParameters(
eps=eps, a=torch.as_tensor(a), J=J, q=1.1, g=0.1, x_th=torch.as_tensor(0.65),
beta=torch.as_tensor(0.0)
)
bifparams = []
bifparams_second = []
for i in np.arange(0.03, 0.04, 0.001):
for j in np.arange(0.025, 0.1, 0.002):
params_spiking = KNNetParameters(eps = j, beta = i, d = 0.26, a = 0.25, J = 0.1081 + 0.1)
f_out_x, f_out_y = one_neuron(0.3, 0, nt, params_spiking)
f_out = np.concatenate([[f_out_x], [f_out_y]], 0).T
x_initial = 0.9 * torch.rand(hidden_size, 1).to(device)
y_initial = torch.zeros(hidden_size, 1).to(device)
z_initial = torch.zeros(hidden_size, 1).to(device)
ISPC_initial = torch.zeros(hidden_size, 1).to(device)
initial_state = KNNetState(x=x_initial, y=y_initial, z=z_initial, ISPC=ISPC_initial)
net = KNNet(input_size, hidden_size, output_size, p=p)
net.to_device(device)
lp = ForceParameters(stop_learning=icrit, start_learning=imin)
fl = ForceLearn(net=net, lp=lp, save_states=True)
train_logs, states = fl.train(target_outputs=f_out, state=initial_state)
L2 = torch.linalg.norm(train_logs[-1000:, 0, 0] - f_out[-1000:, 0])
L2_second = torch.linalg.norm(train_logs[-1000:, 1, 0] - f_out[-1000:, 1])
print(torch.log(L2))
bifparams.append([i, j, torch.log(L2).item()])
bifparams_second.append([i, j, torch.log(L2_second).item()])
print(f'1dim: {bifparams[-1]}, 2dim: {bifparams_second[-1]}')
bifparams = np.array(bifparams)
bifparams_second = np.array(bifparams_second)
np.save('./betaeps_3', bifparams)
np.save('./betaeps_second_3', bifparams_second) | [
"torch.rand",
"torch.sqrt",
"torch.zeros",
"torch.linalg.norm",
"torch.as_tensor",
"torch.log"
] | 1.8.2 | aw02m/Spiking_neural_networks | 4c23c50b52b15a9e5709cb672fd18cd22218b9f2 |
1.7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
created by Halo 2020/10/28 11:28
https://tangshusen.me/Dive-into-DL-PyTorch/#/chapter03_DL-basics/3.12_weight-decay
"""
import torch
import torch.nn as nn
import numpy as np
import mytorch.d2lzh_pytorch as d2l
n_train, n_test, num_inputs = 20, 100, 200
true_w, true_b = torch.ones(num_inputs, 1) * 0.01, 0.05
features = torch.randn((n_train + n_test, num_inputs))
labels = torch.matmul(features, true_w) + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
train_features, test_features = features[:n_train, :], features[n_train:, :]
train_labels, test_labels = labels[:n_train], labels[n_train:]
def init_params():
w = torch.randn((num_inputs, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
return [w, b]
def l2_penalty(w):
return (w ** 2).sum() / 2
batch_size, num_epochs, lr = 1, 100, 0.003
net, loss = d2l.linreg, d2l.squared_loss
dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)
def fit_and_plot(lambd):
w, b = init_params()
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
l = loss(net(X, w, b), y) + lambd * l2_penalty(w)
l = l.sum()
if w.grad is not None:
w.grad.data.zero_()
b.grad.data.zero_()
l.backward()
d2l.sgd([w, b], lr, batch_size)
train_ls.append(loss(net(train_features, w, b), train_labels).mean().item())
test_ls.append(loss(net(test_features, w, b), test_labels).mean().item())
d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('L2 norm of w:', w.norm().item())
# 权重衰减可以通过优化器中的weight_decay超参数来指定。
def fit_and_plot_pytorch(wd):
net = nn.Linear(num_inputs, 1)
nn.init.normal_(net.weight, mean=0, std=1)
nn.init.normal_(net.bias, mean=0, std=1)
optimizer_w = torch.optim.SGD(params=[net.weight], lr=lr, weight_decay=wd) # 对权重参数衰减
optimizer_b = torch.optim.SGD(params=[net.bias], lr=lr) # 不对偏差参数衰减
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
l = loss(net(X), y).mean()
optimizer_w.zero_grad()
optimizer_b.zero_grad()
l.backward()
# 对两个optimizer实例分别调用step函数,从而分别更新权重和偏差
optimizer_w.step()
optimizer_b.step()
train_ls.append(loss(net(train_features), train_labels).mean().item())
test_ls.append(loss(net(test_features), test_labels).mean().item())
d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('L2 norm of w:', net.weight.data.norm().item())
fit_and_plot(lambd=0)
fit_and_plot(lambd=3)
fit_and_plot_pytorch(0)
fit_and_plot_pytorch(3)
| [
"torch.zeros",
"torch.nn.Linear",
"torch.optim.SGD",
"torch.ones",
"torch.randn",
"torch.nn.init.normal_",
"torch.utils.data.DataLoader",
"torch.matmul",
"torch.utils.data.TensorDataset"
] | 1.7.0 | Halo1236/Dive-into-DL-PyTorch | 586b4e9ca77b2121ce5f5bec8b0a893b33f1b574 |
1.4 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from torch.nn import functional as F
from detectron2.layers import paste_masks_in_image
from detectron2.structures import Instances
def detector_postprocess(results, output_height, output_width, mask_threshold=0.5):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0])
results = Instances((output_height, output_width), **results.get_fields())
if results.has("pred_boxes"):
output_boxes = results.pred_boxes
elif results.has("proposal_boxes"):
output_boxes = results.proposal_boxes
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
if results.has("pred_masks"):
results.pred_masks = paste_masks_in_image(
results.pred_masks[:, 0, :, :], # N, 1, M, M
results.pred_boxes,
results.image_size,
threshold=mask_threshold,
)
if results.has("pred_keypoints"):
results.pred_keypoints[:, :, 0] *= scale_x
results.pred_keypoints[:, :, 1] *= scale_y
return results, output_boxes.nonempty()
def sem_seg_postprocess(result, img_size, output_height, output_width):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
where C is the number of classes, and H, W are the height and width of the prediction.
img_size (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
semantic segmentation prediction (Tensor): A tensor of the shape
(C, output_height, output_width) that contains per-pixel soft predictions.
"""
result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
result = F.interpolate(
result, size=(output_height, output_width), mode="bilinear", align_corners=False
)[0]
return result
| [
"torch.nn.functional.interpolate"
] | 1.4.0 | aleSuglia/py-bottom-up-attention | a97142ad3526c11272c471ee7d610494f1247b7b |
1.0 | """Training utilities."""
import os
from typing import Any, Dict, Union
import pytorch_lightning as pl
import torch
from loguru import logger
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from .dataset import (
DATASETS,
EnzymaticReactionDataset,
EnzymaticReactionLightningDataModule,
)
from .model import EnzymaticReactionLightningModule
def get_data_module(
dataset_args: Dict[str, Union[float, str, int]],
) -> EnzymaticReactionLightningDataModule:
"""
Get a data module for enzymatic reactions.
Args:
dataset_args: dictionary containing all the necessary parameters for the dataset creation.
Returns:
data module for enzymatic reactions.
"""
return EnzymaticReactionLightningDataModule(
dataset_args,
DATASETS.get(
str(dataset_args.get("dataset_type", "enzymatic")), EnzymaticReactionDataset
),
)
def train(
model_args: Dict[str, Union[float, str, int]],
model_architecture: Dict[str, Union[float, str, int]],
dataset_args: Dict[str, Union[float, str, int]],
trainer_args: Dict[str, Any],
) -> None:
"""
Train a model.
Args:
model_args: dictionary containing all the parameters for the mode configuration.
model_architecture: dictionary containing the information related to the architecture of the model.
dataset_args: dictionary containing all the necessary parameters for the dataset creation.
training_args: dictionary containing all the necessary parameters for the training routine.
"""
data_module = get_data_module(dataset_args)
model_architecture["vocab_size"] = data_module.train_dataset.tokenizer.vocab_size
model = EnzymaticReactionLightningModule(model_args, model_architecture)
log_dir = trainer_args["log_dir"]
os.makedirs(log_dir, exist_ok=True)
del trainer_args["log_dir"]
lightning_logger = WandbLogger(
name="mlm-logger", save_dir=log_dir, log_model=True, project="rxn-aa-mapper"
)
trainer_args["logger"] = lightning_logger
if not torch.cuda.is_available():
del trainer_args["gpus"]
if not isinstance(trainer_args["val_check_interval"], int):
trainer_args["val_check_interval"] = 10000
logger.warning(
f"please set trainer['val_check_interval'] to an integer value, defaulting to {trainer_args['val_check_interval']}"
)
if (
"accelerator" not in trainer_args
or trainer_args.get("accelerator", "ddp") == "ddp_spawn"
):
trainer_args["accelerator"] = "ddp"
logger.warning(
f"ddp_spawn not supported because of pickle issues, defaulting to {trainer_args['accelerator']}"
)
# gather the callbacks
trainer_args["callbacks"] = []
if "early_stopping_callback" in trainer_args:
callback: Callback = EarlyStopping(**trainer_args["early_stopping_callback"])
del trainer_args["early_stopping_callback"]
trainer_args["callbacks"].append(callback)
if "model_checkpoint_callback" in trainer_args:
callback = ModelCheckpoint(**trainer_args["model_checkpoint_callback"])
del trainer_args["model_checkpoint_callback"]
trainer_args["callbacks"].append(callback)
trainer = pl.Trainer(**trainer_args)
trainer.fit(model, data_module)
def checkpoint_to_module(
input_checkpoint: str,
model_args: Dict[str, Union[float, str, int]],
model_architecture: Dict[str, Union[float, str, int]],
) -> EnzymaticReactionLightningModule:
"""
Transform a checkpoint into a module.
Args:
input_checkpoint: model checkpoint.
model_args: dictionary containing all the parameters for the mode configuration.
model_architecture: dictionary containing the information related to the architecture of the model.
Returns:
the ligthining module.
"""
return EnzymaticReactionLightningModule.load_from_checkpoint(
checkpoint_path=input_checkpoint,
model_args=model_args,
model_architecture=model_architecture,
)
| [
"torch.cuda.is_available"
] | 1.0 | yvesnana/rxnaamapper | 48fb6a6f45f5ec087f99cedbac34eda2a65e14a3 |
1.9 | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
# This file has been modified to enable CPU inference!
import torch
from torch.autograd import Variable
import torch.nn.functional as F
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output):
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(z * z) / (2 * self.sigma * self.sigma) - log_s_total - log_det_W_total
return loss / (z.size(0) * z.size(1) * z.size(2))
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, "W_inverse"):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == "torch.cuda.HalfTensor":
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W)
z = self.conv(z)
return z, log_det_W
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary difference
from WaveNet is the convolutions need not be causal. There is also no dilation
size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels, kernel_size):
super(WN, self).__init__()
assert kernel_size % 2 == 1
assert n_channels % 2 == 0
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name="weight")
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels * n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size, dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
output = torch.zeros_like(audio)
n_channels_tensor = torch.IntTensor([self.n_channels])
spect = self.cond_layer(spect)
for i in range(self.n_layers):
spect_offset = i * 2 * self.n_channels
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
spect[:, spect_offset : spect_offset + 2 * self.n_channels, :],
n_channels_tensor,
)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = audio + res_skip_acts[:, : self.n_channels, :]
output = output + res_skip_acts[:, self.n_channels :, :]
else:
output = output + res_skip_acts
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every, n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels, n_mel_channels, 1024, stride=256)
assert n_group % 2 == 0
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group / 2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size / 2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels # Useful during inference
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert spect.size(2) >= audio.size(1)
if spect.size(2) > audio.size(1):
spect = spect[:, :, : audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:, : self.n_early_size, :])
audio = audio[:, self.n_early_size :, :]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s) * audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1], 1)
output_audio.append(audio)
return torch.cat(output_audio, 1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
if spect.type() == "torch.cuda.HalfTensor":
audio = torch.cuda.HalfTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_()
else:
if torch.cuda.is_available():
audio = torch.cuda.FloatTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_()
else:
audio = torch.FloatTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_()
audio = torch.autograd.Variable(sigma * audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k](audio, reverse=True)
if k % self.n_early_every == 0 and k > 0:
if spect.type() == "torch.cuda.HalfTensor":
z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
else:
if torch.cuda.is_available():
z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
else:
z = torch.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
audio = torch.cat((sigma * z, audio), 1)
audio = audio.permute(0, 2, 1).contiguous().view(audio.size(0), -1).data
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layer = torch.nn.utils.remove_weight_norm(WN.cond_layer)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
| [
"torch.sigmoid",
"torch.cat",
"torch.nn.ConvTranspose1d",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.IntTensor",
"torch.autograd.Variable",
"torch.nn.utils.remove_weight_norm",
"torch.FloatTensor",
"torch.det",
"torch.nn.functional.conv1d",
"torch.cuda.is_available",
"torch.logdet",
"torch.zeros_like",
"torch.tanh",
"torch.nn.utils.weight_norm",
"torch.exp",
"torch.sum"
] | 1.9.0 | brooklynbagel/Voice-Cloning-App | 6e0034dc0b4e21f669d28753b5f30b32cca382ad |
1.8 | import warnings
from typing import Any, Dict, Optional, Type, Union
import numpy as np
import torch as th
from mod_gym.gym import spaces
from torch.nn import functional as F
from mod_stable_baselines3.stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from mod_stable_baselines3.stable_baselines3.common.policies import ActorCriticPolicy
from mod_stable_baselines3.stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from mod_stable_baselines3.stable_baselines3.common.utils import explained_variance, get_schedule_fn
class PPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
and Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)
NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)
See https://github.com/pytorch/pytorch/issues/29372
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: Optional[int] = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(PPO, self).__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=False,
supported_action_spaces=(
spaces.Box,
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
# Sanity check, otherwise it will lead to noisy gradient and NaN
# because of the advantage normalization
assert (
batch_size > 1
), "`batch_size` must be greater than 1. See https://github.com/DLR-RM/stable-baselines3/issues/440"
if self.env is not None:
# Check that `n_steps * n_envs > 1` to avoid NaN
# when doing advantage normalization
buffer_size = self.env.num_envs * self.n_steps
assert (
buffer_size > 1
), f"`n_steps * n_envs` must be greater than 1. Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}"
# Check that the rollout buffer size is a multiple of the mini-batch size
untruncated_batches = buffer_size // batch_size
if buffer_size % batch_size > 0:
warnings.warn(
f"You have specified a mini-batch size of {batch_size},"
f" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`,"
f" after every {untruncated_batches} untruncated mini-batches,"
f" there will be a truncated mini-batch of size {buffer_size % batch_size}\n"
f"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\n"
f"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})"
)
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.target_kl = target_kl
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(PPO, self)._setup_model()
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
if isinstance(self.clip_range_vf, (float, int)):
assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
continue_training = True
# train for n_epochs epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Re-sample the noise matrix because the log_std has changed
# TODO: investigate why there is no issue with the gradient
# if that line is commented (as in SAC)
if self.use_sde:
self.policy.reset_noise(self.batch_size)
values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))
self.logger.record("train/clip_fraction", np.mean(clip_fractions))
self.logger.record("train/loss", loss.item())
self.logger.record("train/explained_variance", explained_var)
if hasattr(self.policy, "log_std"):
self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
self.logger.record("train/clip_range_vf", clip_range_vf)
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "PPO",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "PPO":
return super(PPO, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
| [
"torch.min",
"torch.no_grad",
"torch.clamp",
"torch.nn.functional.mse_loss",
"torch.abs",
"torch.exp",
"torch.mean"
] | 1.8.1 | Practical-Formal-Methods/mod_stable_baselines3 | 08bdb0a529c8ab446ac7973f2a02f832c0c3f454 |
1.8 | # Copyright 2021 cstsunfu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch
from typing import Dict, List
from . import module_register, module_config_register, Module
from dlk.utils.config import BaseConfig
@module_config_register("biaffine")
class BiAffineConfig(BaseConfig):
"""Config for BiAffine
Config Example:
>>> {
>>> "config": {
>>> "input_size": 256,
>>> "output_size": 2,
>>> "dropout": 0.0, //generally no need dropout
>>> "bias": true, // use bias or not in biaffine
>>> },
>>> "_name": "biaffine",
>>> }
"""
def __init__(self, config: Dict):
super(BiAffineConfig, self).__init__(config)
config = config['config']
self.input_size = config['input_size']
self.output_size = config['output_size']
self.dropout = float(config['dropout'])
self.bias = config['bias']
self.post_check(config, used=[
"input_size",
"output_size",
"dropout",
"bias",
])
@module_register("biaffine")
class BiAffine(Module):
"""wrap for nn.BiAffine"""
def __init__(self, config: BiAffineConfig):
super(BiAffine, self).__init__()
if config.bias:
self.biaffine = nn.Parameter(torch.randn(config.input_size+1, config.output_size, config.input_size+1))
else:
self.biaffine = nn.Parameter(torch.randn(config.input_size, config.output_size, config.input_size))
self.dropout = nn.Dropout(p=float(config.dropout))
self.config = config
def init_weight(self, method):
"""init the weight of submodules by 'method'
Args:
method: init method
Returns:
None
"""
torch.nn.init.xavier_uniform_(self.biaffine)
def forward(self, input_a: torch.Tensor, input_b: torch.Tensor)->torch.Tensor:
"""do forward on a mini batch
Args:
input_a: a mini batch inputs_a, shape==(batch_size, input_a_len, input_size)
input_b: a mini batch inputs_b, shape==(batch_size, input_b_len, input_size)
Returns:
input_a x biaffine x input_b, shape==(batch_size, input_a_len, input_b_len, output_size)
"""
if self.config.bias:
output = self.dropout(torch.einsum('bmi,ioj,bnj->bmno',
torch.cat((input_a, torch.ones_like(input_a[..., :1])), dim=-1),
self.biaffine,
torch.cat((input_b, torch.ones_like(input_b[..., :1])), dim=-1)
))
else:
output = self.dropout(torch.einsum('bmi,ioj,bnj->bmno',
input_a,
self.biaffine,
input_b,
))
return output
| [
"torch.ones_like",
"torch.nn.init.xavier_uniform_",
"torch.randn",
"torch.einsum"
] | 1.8.2 | cstsunfu/dlkit | 69e0efd372fa5c0ae5313124d0ba1ef55b535196 |
1.8 | '''
Accelerate demo with fp16 and multi-gpu support.
Single CPU:
python accelerate_demo.py --cpu
16-bit Floating Point:
python accelerate_demo.py --fp16
Model from timm:
python accelerate_demo.py --timm
Singe-GPU:
python accelerate_demo.py
Multi-GPU or Multi-CPU:
accelerate config
accelerate launch accelerate_demo.py
'''
import torch
import wandb
import datetime
import timm
import torchvision
import argparse
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from ui import progress_bar
from accelerate import Accelerator
def init_wandb():
wandb.login()
config = {
"learning_rate": 0.1,
"epochs": 100,
"batch_size": 128,
"dataset": "cifar10"
}
run = wandb.init(project="accelerate-options-project", entity="upeee", config=config)
return run
def run_experiment(args):
accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu)
_ = init_wandb()
# With timm, no need to manually replace the classifier head.
# Just initialize the model with the correct number of classes.
# However, timm model has a lower accuracy (TODO: why?)
if args.timm:
model = timm.create_model('resnet18', pretrained=False, num_classes=10)
else:
model = torchvision.models.resnet18(pretrained=False, progress=True)
model.fc = torch.nn.Linear(model.fc.in_features, 10)
# wandb will automatically log the model gradients.
wandb.watch(model)
loss = torch.nn.CrossEntropyLoss()
optimizer = SGD(model.parameters(), lr=wandb.config.learning_rate)
scheduler = CosineAnnealingLR(optimizer, T_max=wandb.config.epochs)
x_train = datasets.CIFAR10(root='./data', train=True,
download=True,
transform=transforms.ToTensor())
x_test = datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transforms.ToTensor())
train_loader = DataLoader(x_train,
batch_size=wandb.config.batch_size,
shuffle=True,
num_workers=2)
test_loader = DataLoader(x_test,
batch_size=wandb.config.batch_size,
shuffle=False,
num_workers=2)
label_human = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
table_test = wandb.Table(columns=['Image', "Ground Truth", "Initial Pred Label",])
image, label = iter(test_loader).next()
image = image.to(accelerator.device)
# Accelerate API
model = accelerator.prepare(model)
optimizer = accelerator.prepare(optimizer)
scheduler = accelerator.prepare(scheduler)
train_loader = accelerator.prepare(train_loader)
test_loader = accelerator.prepare(test_loader)
model.eval()
with torch.no_grad():
pred = torch.argmax(model(image), dim=1).cpu().numpy()
for i in range(8):
table_test.add_data(wandb.Image(image[i]),
label_human[label[i]],
label_human[pred[i]])
accelerator.print(label_human[label[i]], "vs. ", label_human[pred[i]])
start_time = datetime.datetime.now()
best_acc = 0
for epoch in range(wandb.config["epochs"]):
train_acc, train_loss = train(epoch, model, optimizer, scheduler, train_loader, loss, accelerator)
test_acc, test_loss = test(model, test_loader, loss, accelerator)
if test_acc > best_acc:
wandb.run.summary["Best accuracy"] = test_acc
best_acc = test_acc
if args.fp16:
accelerator.save(model.state_dict(), "./resnet18_best_acc_fp16.pth")
else:
accelerator.save(model, "./resnet18_best_acc.pth")
wandb.log({
"Train accuracy": train_acc,
"Test accuracy": test_acc,
"Train loss": train_loss,
"Test loss": test_loss,
"Learning rate": optimizer.param_groups[0]['lr']
})
elapsed_time = datetime.datetime.now() - start_time
accelerator.print("Elapsed time: %s" % elapsed_time)
wandb.run.summary["Elapsed train time"] = str(elapsed_time)
wandb.run.summary["Fp16 enabled"] = str(args.fp16)
wandb.run.summary["Using timm"] = str(args.timm)
wandb.run.summary["Using CPU"] = str(args.cpu)
model.eval()
with torch.no_grad():
pred = torch.argmax(model(image), dim=1).cpu().numpy()
final_pred = []
for i in range(8):
final_pred.append(label_human[pred[i]])
accelerator.print(label_human[label[i]], "vs. ", final_pred[i])
table_test.add_column(name="Final Pred Label", data=final_pred)
wandb.log({"Test data": table_test})
wandb.finish()
def train(epoch, model, optimizer, scheduler, train_loader, loss, accelerator):
model.train()
train_loss = 0
correct = 0
train_samples = 0
# sample a batch. compute loss and backpropagate
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss_value = loss(output, target)
accelerator.backward(loss_value)
optimizer.step()
scheduler.step(epoch)
train_loss += loss_value.item()
train_samples += len(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
if batch_idx % 10 == 0:
accuracy = 100. * correct / len(train_loader.dataset)
progress_bar(batch_idx,
len(train_loader),
'Train Epoch: {}, Loss: {:0.2e}, Acc: {:.2f}%'.format(epoch+1,
train_loss/train_samples, accuracy))
train_loss /= len(train_loader.dataset)
accuracy = 100. * correct / len(train_loader.dataset)
return accuracy, train_loss
def test(model, test_loader, loss, accelerator):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = model(data)
test_loss += loss(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
accelerator.print('\nTest Loss: {:.4f}, Acc: {:.2f}%\n'.format(test_loss, accuracy))
return accuracy, test_loss
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument("--timm", action="store_true", help="If passed, build model using timm library.")
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.")
# Seems that this is not supported in the Accelerator version installed
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
run_experiment(args)
if __name__ == "__main__":
main() | [
"torch.nn.Linear",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss"
] | 1.8.0 | Cahlil-Togonon/Deep-Learning-Experiments | 501ae610b0a8fb7fb75a53dcfdab71be49274b58 |
1.3 | import platform
import pytest
import torch
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Subset
import tests.base.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
def test_fit_train_loader_only(tmpdir):
model = EvalModelTemplate()
train_dataloader = model.train_dataloader()
model.train_dataloader = None
model.val_dataloader = None
model.test_dataloader = None
model.validation_step = None
model.validation_epoch_end = None
model.test_step = None
model.test_epoch_end = None
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, train_dataloader=train_dataloader)
def test_fit_val_loader_only(tmpdir):
model = EvalModelTemplate()
train_dataloader = model.train_dataloader()
val_dataloader = model.val_dataloader()
model.train_dataloader = None
model.val_dataloader = None
model.test_dataloader = None
model.test_step = None
model.test_epoch_end = None
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)
@pytest.mark.parametrize("dataloader_options", [
dict(train_percent_check=-0.1),
dict(train_percent_check=1.1),
dict(val_check_interval=1.1),
dict(val_check_interval=10000),
])
def test_dataloader_config_errors(tmpdir, dataloader_options):
model = EvalModelTemplate()
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
**dataloader_options,
)
with pytest.raises(ValueError):
trainer.fit(model)
def test_multiple_val_dataloader(tmpdir):
"""Verify multiple val_dataloader."""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple
model.validation_step = model.validation_step__multiple_dataloaders
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=1.0,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
# verify there are 2 val loaders
assert len(trainer.val_dataloaders) == 2, \
'Multiple val_dataloaders not initiated properly'
# make sure predictions are good for each val set
for dataloader in trainer.val_dataloaders:
tutils.run_prediction(dataloader, trainer.model)
def test_multiple_test_dataloader(tmpdir):
"""Verify multiple test_dataloader."""
model = EvalModelTemplate()
model.test_dataloader = model.test_dataloader__multiple
model.test_step = model.test_step__multiple_dataloaders
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
trainer.fit(model)
trainer.test()
# verify there are 2 test loaders
assert len(trainer.test_dataloaders) == 2, \
'Multiple test_dataloaders not initiated properly'
# make sure predictions are good for each test set
for dataloader in trainer.test_dataloaders:
tutils.run_prediction(dataloader, trainer.model)
# run the test method
trainer.test()
def test_train_dataloader_passed_to_fit(tmpdir):
"""Verify that train dataloader can be passed to fit """
# only train passed to fit
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True))
result = trainer.fit(model, **fit_options)
assert result == 1
def test_train_val_dataloaders_passed_to_fit(tmpdir):
""" Verify that train & val dataloader can be passed to fit """
# train, val passed to fit
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
result = trainer.fit(model, **fit_options)
assert result == 1
assert len(trainer.val_dataloaders) == 1, \
f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
def test_all_dataloaders_passed_to_fit(tmpdir):
"""Verify train, val & test dataloader(s) can be passed to fit and test method"""
model = EvalModelTemplate()
# train, val and test passed to fit
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
test_options = dict(test_dataloaders=model.dataloader(train=False))
result = trainer.fit(model, **fit_options)
trainer.test(**test_options)
assert result == 1
assert len(trainer.val_dataloaders) == 1, \
f'val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 1, \
f'test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
def test_multiple_dataloaders_passed_to_fit(tmpdir):
"""Verify that multiple val & test dataloaders can be passed to fit."""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
# train, multiple val and multiple test passed to fit
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=[model.dataloader(train=False),
model.dataloader(train=False)])
test_options = dict(test_dataloaders=[model.dataloader(train=False),
model.dataloader(train=False)])
trainer.fit(model, **fit_options)
trainer.test(**test_options)
assert len(trainer.val_dataloaders) == 2, \
f'Multiple `val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 2, \
f'Multiple `test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
def test_mixing_of_dataloader_options(tmpdir):
"""Verify that dataloaders can be passed to fit"""
model = EvalModelTemplate()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# fit model
trainer = Trainer(**trainer_options)
results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))
assert results
# fit model
trainer = Trainer(**trainer_options)
results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))
assert results
trainer.test(test_dataloaders=model.dataloader(train=False))
assert len(trainer.val_dataloaders) == 1, \
f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 1, \
f'`test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
def test_train_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.fit(model)
def test_val_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_percent_check=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.fit(model)
def test_test_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.test_dataloader = model.test_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, test_percent_check=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.test(model)
@pytest.mark.parametrize('check_interval', [50, 1.0])
def test_inf_train_dataloader(tmpdir, check_interval):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=check_interval
)
result = trainer.fit(model)
# verify training completed
assert result == 1
@pytest.mark.parametrize('check_interval', [1.0])
def test_inf_val_dataloader(tmpdir, check_interval):
"""Test inf val data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__infinite
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=check_interval,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
def test_error_on_zero_len_dataloader(tmpdir):
""" Test that error is raised if a zero-length dataloader is defined """
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__zero_length
# fit model
with pytest.raises(ValueError):
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
test_percent_check=0.5
)
trainer.fit(model)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
def test_warning_with_few_workers(tmpdir):
""" Test that error is raised if dataloader with only a few workers is used """
model = EvalModelTemplate()
# logger file to get meta
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
test_options = dict(test_dataloaders=model.dataloader(train=False))
trainer = Trainer(**trainer_options)
# fit model
with pytest.warns(UserWarning, match='train'):
trainer.fit(model, **fit_options)
with pytest.warns(UserWarning, match='val'):
trainer.fit(model, **fit_options)
with pytest.warns(UserWarning, match='test'):
trainer.test(**test_options)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
def test_dataloader_reinit_for_subclass():
class CustomDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, dummy_kwarg=None):
super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler,
num_workers, collate_fn, pin_memory, drop_last, timeout,
worker_init_fn)
self.dummy_kwarg = dummy_kwarg
trainer = Trainer(
gpus=[0, 1],
num_nodes=1,
distributed_backend='ddp',
)
class CustomDummyObj:
sampler = None
result = trainer.auto_add_sampler(CustomDummyObj(), train=True)
assert isinstance(result, CustomDummyObj), "Wrongly reinstantiated data loader"
result = trainer.auto_add_sampler(CustomDataLoader(list(range(1000))), train=True)
assert isinstance(result, torch.utils.data.DataLoader)
assert isinstance(result, CustomDataLoader)
assert hasattr(result, 'dummy_kwarg')
@pytest.mark.skipif(torch.cuda.device_count() < 3, reason='Test requires multiple GPUs')
def test_batch_size_smaller_than_num_gpus():
# we need at least 3 gpus for this test
num_gpus = 3
batch_size = 3
class CurrentTestModel(EvalModelTemplate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# batch norm doesn't work with batch size 1, we replace it
self.c_d1_bn = torch.nn.ReLU()
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
loss = output['loss']
# we make sure to add some metrics to the output dict,
# this is essential for this test
output['progress_bar'] = {'train_loss': loss}
return output
def train_dataloader(self):
dataloader = super().train_dataloader()
# construct a dataset with a size that is not divisible by num_gpus
# therefore the last batch will have a size < num_gpus
size = num_gpus * batch_size + (num_gpus - 1)
dataset = Subset(dataloader.dataset, range(size))
dataloader = DataLoader(
dataset,
batch_size=self.hparams.batch_size,
drop_last=False,
)
return dataloader
hparams = EvalModelTemplate.get_default_hparams()
hparams.batch_size = batch_size
model = CurrentTestModel(hparams)
trainer = Trainer(
max_epochs=1,
val_percent_check=0,
gpus=num_gpus,
)
# we expect the reduction for the metrics also to happen on the last batch
# where we will get fewer metrics than gpus
result = trainer.fit(model)
assert 1 == result
| [
"torch.nn.ReLU",
"torch.utils.data.dataloader.DataLoader",
"torch.cuda.device_count"
] | 1.3 | binshengliu/pytorch-lightning | 8f6b7a2b4fea9b7bd0b873f5973e6364b3981412 |
0.4 | '''
Script to train the ranker
Should add some sort of image pool someday...?
'''
import time
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer
from models import networks
import pdb
import torch
from collections import OrderedDict
def load_chkpt(network, fname):
chkpt = torch.load(fname)
new_chkpt = OrderedDict()
for k, v in chkpt.items():
name = 'module.' + k # add `module`
new_chkpt[name] = v
network.load_state_dict(new_chkpt)
if __name__ == '__main__':
opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
model = create_model(opt)
model.setup(opt)
visualizer = Visualizer(opt)
total_steps = 0
'''
chkpt_D = torch.load('checkpoints/streetview_throttled/15_net_D.pth')
chkpt_G = torch.load('checkpoints/streetview_throttled/15_net_G.pth')
new_chkpt_D = OrderedDict()
new_chkpt_G = OrderedDict()
for k, v in chkpt_D.items():
name = 'module.' + k # add `module`
new_chkpt_D[name] = v
for k, v in chkpt_G.items():
name = 'module.' + k # add `module`
new_chkpt_G[name] = v
model.netD.load_state_dict(new_chkpt_D)
model.netG.load_state_dict(new_chkpt_G)
'''
G_model_chkpts = ['checkpoints/street_decaythrottle45_halflr/1_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/2_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/3_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/4_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/5_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/6_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/7_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/8_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/9_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/10_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/11_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/12_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/13_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/14_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/15_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/16_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/17_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/18_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/19_net_G.pth',
'checkpoints/street_decaythrottle45_halflr/20_net_G.pth']
G_networks = []
for i in range(len(G_model_chkpts)):
netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, opt.gpu_ids)
load_chkpt(netG, G_model_chkpts[i])
G_networks.append(netG)
netGs = networks.RandomNetwork(G_networks)
#load_chkpt(model.netG, 'checkpoints/streetview_throttled/15_net_G.pth')
model.netG = netGs
load_chkpt(model.netD, 'checkpoints/street_decaythrottle45_halflr/20_net_D.pth')
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset):
iter_start_time = time.time()
if total_steps % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data)
# optimize only discriminator
model.forward()
model.set_requires_grad(model.netD, True)
model.optimizer_D.zero_grad()
model.backward_D()
model.optimizer_D.step()
model.set_requires_grad(model.netD, False)
# need this to prevent logger from complaining
# because it wants to log the G loss, even though
# we aren't updating it
model.backward_G()
if total_steps % opt.display_freq == 0:
save_result = total_steps % opt.update_html_freq == 0
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_steps % opt.print_freq == 0:
losses = model.get_current_losses()
t = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_networks('latest', saveG=False)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save_networks('latest', saveG=False)
model.save_networks(epoch, saveG=False)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
| [
"torch.load"
] | 0.4.0 | dangeng/infiniteGANorama | 92c9cbe0638cf9fcdc05020759772e36aebf788c |
1.5 | #!/usr/bin/env python
"""
Simple implementation for mixup. The loss and onehot functions origin from: https://github.com/moskomule/mixup.pytorch
Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz: mixup: Beyond Empirical Risk Minimization
https://arxiv.org/abs/1710.09412
"""
__all__ = [ 'mixup_cross_entropy_loss', 'mixup' ]
import numpy as np
import torch
from torch.autograd import Variable
def mixup_cross_entropy_loss(input, target, size_average=True):
"""Origin: https://github.com/moskomule/mixup.pytorch
in PyTorch's cross entropy, targets are expected to be labels
so to predict probabilities this loss is needed
suppose q is the target and p is the input
loss(p, q) = -\sum_i q_i \log p_i
"""
assert input.size() == target.size()
assert isinstance(input, Variable) and isinstance(target, Variable)
input = torch.log(torch.nn.functional.softmax(input, dim=1).clamp(1e-5, 1))
# input = input - torch.log(torch.sum(torch.exp(input), dim=1)).view(-1, 1)
loss = - torch.sum(input * target)
return loss / input.size()[0] if size_average else loss
def onehot(targets, num_classes):
"""Origin: https://github.com/moskomule/mixup.pytorch
convert index tensor into onehot tensor
:param targets: index tensor
:param num_classes: number of classes
"""
assert isinstance(targets, torch.LongTensor)
return torch.zeros(targets.size()[0], num_classes).scatter_(1, targets.view(-1, 1), 1)
def mixup(inputs, targets, num_classes, alpha=2):
"""Mixup on 1x32x32 mel-spectrograms.
"""
s = inputs.size()[0]
weight = torch.Tensor(np.random.beta(alpha, alpha, s))
index = np.random.permutation(s)
x1, x2 = inputs, inputs[index, :, :, :]
y1, y2 = onehot(targets, num_classes), onehot(targets[index,], num_classes)
weight = weight.view(s, 1, 1, 1)
inputs = weight*x1 + (1-weight)*x2
weight = weight.view(s, 1)
targets = weight*y1 + (1-weight)*y2
return inputs, targets
| [
"torch.nn.functional.softmax",
"torch.sum"
] | 1.5.1 | bozliu/E2E-Keyword-Spotting | 64fc6fe414370a12a22fdf8ca5c8379d2c60b64e |
0.4 | """
A :class:`~allennlp.training.trainer.Trainer` is responsible for training a
:class:`~allennlp.models.model.Model`.
Typically you might create a configuration file specifying the model and
training parameters and then use :mod:`~allennlp.commands.train`
rather than instantiating a ``Trainer`` yourself.
"""
# pylint: disable=too-many-lines
import logging
import os
import shutil
import time
import re
import datetime
import traceback
from typing import Dict, Optional, List, Tuple, Union, Iterable, Any, Set
import torch
import torch.optim.lr_scheduler
from torch.nn.parallel import replicate, parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from tensorboardX import SummaryWriter
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import peak_memory_mb, gpu_memory_mb
from allennlp.common.tqdm import Tqdm
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.models.model import Model
from allennlp.nn import util
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def is_sparse(tensor):
return tensor.is_sparse
def sparse_clip_norm(parameters, max_norm, norm_type=2) -> float:
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Supports sparse gradients.
Parameters
----------
parameters : ``(Iterable[torch.Tensor])``
An iterable of Tensors that will have gradients normalized.
max_norm : ``float``
The max norm of the gradients.
norm_type : ``float``
The type of the used p-norm. Can be ``'inf'`` for infinity norm.
Returns
-------
Total norm of the parameters (viewed as a single vector).
"""
# pylint: disable=invalid-name,protected-access
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
if is_sparse(p.grad):
# need to coalesce the repeated indices before finding norm
grad = p.grad.data.coalesce()
param_norm = grad._values().norm(norm_type)
else:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
if is_sparse(p.grad):
p.grad.data._values().mul_(clip_coef)
else:
p.grad.data.mul_(clip_coef)
return total_norm
def move_optimizer_to_cuda(optimizer):
"""
Move the optimizer state to GPU, if necessary.
After calling, any parameter specific state in the optimizer
will be located on the same device as the parameter.
"""
for param_group in optimizer.param_groups:
for param in param_group['params']:
if param.is_cuda:
param_state = optimizer.state[param]
for k in param_state.keys():
if isinstance(param_state[k], torch.Tensor):
param_state[k] = param_state[k].cuda(device=param.get_device())
class TensorboardWriter:
"""
Wraps a pair of ``SummaryWriter`` instances but is a no-op if they're ``None``.
Allows Tensorboard logging without always checking for Nones first.
"""
def __init__(self, train_log: SummaryWriter = None, validation_log: SummaryWriter = None) -> None:
self._train_log = train_log
self._validation_log = validation_log
@staticmethod
def _item(value: Any):
if hasattr(value, 'item'):
val = value.item()
else:
val = value
return val
def add_train_scalar(self, name: str, value: float, global_step: int) -> None:
# get the scalar
if self._train_log is not None:
self._train_log.add_scalar(name, self._item(value), global_step)
def add_train_histogram(self, name: str, values: torch.Tensor, global_step: int) -> None:
if self._train_log is not None:
if isinstance(values, torch.Tensor):
values_to_write = values.cpu().data.numpy().flatten()
self._train_log.add_histogram(name, values_to_write, global_step)
def add_validation_scalar(self, name: str, value: float, global_step: int) -> None:
if self._validation_log is not None:
self._validation_log.add_scalar(name, self._item(value), global_step)
def time_to_str(timestamp: int) -> str:
"""
Convert seconds past Epoch to human readable string.
"""
datetimestamp = datetime.datetime.fromtimestamp(timestamp)
return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format(
datetimestamp.year, datetimestamp.month, datetimestamp.day,
datetimestamp.hour, datetimestamp.minute, datetimestamp.second
)
def str_to_time(time_str: str) -> datetime.datetime:
"""
Convert human readable string to datetime.datetime.
"""
pieces: Any = [int(piece) for piece in time_str.split('-')]
return datetime.datetime(*pieces)
class Trainer:
def __init__(self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
validation_dataset: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
model_save_interval: float = None,
cuda_device: Union[int, List] = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None) -> None:
"""
Parameters
----------
model : ``Model``, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their ``forward`` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
optimizer : ``torch.nn.Optimizer``, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
iterator : ``DataIterator``, required.
A method for iterating over a ``Dataset``, yielding padded indexed batches.
train_dataset : ``Dataset``, required.
A ``Dataset`` to train on. The dataset should have already been indexed.
validation_dataset : ``Dataset``, optional, (default = None).
A ``Dataset`` to evaluate on. The dataset should have already been indexed.
patience : Optional[int] > 0, optional (default=None)
Number of epochs to be patient before early stopping: the training is stopped
after ``patience`` epochs with no improvement. If given, it must be ``> 0``.
If None, early stopping is disabled.
validation_metric : str, optional (default="loss")
Validation metric to measure for whether to stop training using patience
and whether to serialize an ``is_best`` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_iterator : ``DataIterator``, optional (default=None)
An iterator to use for the validation set. If ``None``, then
use the training `iterator`.
num_epochs : int, optional (default = 20)
Number of training epochs.
serialization_dir : str, optional (default=None)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
num_serialized_models_to_keep : ``int``, optional (default=20)
Number of previous model checkpoints to retain. Default is to keep 20 checkpoints.
A value of None or -1 means all checkpoints will be kept.
keep_serialized_model_every_num_seconds : ``int``, optional (default=None)
If num_serialized_models_to_keep is not None, then occasionally it's useful to
save models at a given interval in addition to the last num_serialized_models_to_keep.
To do so, specify keep_serialized_model_every_num_seconds as the number of seconds
between permanently saved checkpoints. Note that this option is only used if
num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.
model_save_interval : ``float``, optional (default=None)
If provided, then serialize models every ``model_save_interval``
seconds within single epochs. In all cases, models are also saved
at the end of every epoch if ``serialization_dir`` is provided.
cuda_device : ``int``, optional (default = -1)
An integer specifying the CUDA device to use. If -1, the CPU is used.
grad_norm : ``float``, optional, (default = None).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : ``float``, optional (default = ``None``).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting ``NaNs`` in your gradients during training
that are not solved by using ``grad_norm``, you may need this.
learning_rate_scheduler : ``PytorchLRScheduler``, optional, (default = None)
A Pytorch learning rate scheduler. The learning rate will be decayed with respect to
this schedule at the end of each epoch. If you use
:class:`torch.optim.lr_scheduler.ReduceLROnPlateau`, this will use the ``validation_metric``
provided to determine if learning has plateaued. To support updating the learning
rate on every batch, this can optionally implement ``step_batch(batch_num_total)`` which
updates the learning rate given the batch number.
summary_interval: ``int``, optional, (default = 100)
Number of batches between logging scalars to tensorboard
histogram_interval : ``int``, optional, (default = ``None``)
If not None, then log histograms to tensorboard every ``histogram_interval`` batches.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
``model.get_parameters_for_histogram_tensorboard_logging``.
The layer activations are logged for any modules in the ``Model`` that have
the attribute ``should_log_activations`` set to ``True``. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
"""
self._model = model
self._iterator = iterator
self._validation_iterator = validation_iterator
self._optimizer = optimizer
self._train_data = train_dataset
self._validation_data = validation_dataset
if patience is None: # no early stopping
if validation_dataset:
logger.warning('You provided a validation dataset but patience was set to None, '
'meaning that early stopping is disabled')
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError('{} is an invalid value for "patience": it must be a positive integer '
'or None (if you want to disable early stopping)'.format(patience))
self._patience = patience
self._num_epochs = num_epochs
self._serialization_dir = serialization_dir
self._num_serialized_models_to_keep = num_serialized_models_to_keep
self._keep_serialized_model_every_num_seconds = keep_serialized_model_every_num_seconds
self._serialized_paths: List[Any] = []
self._last_permanent_saved_checkpoint_time = time.time()
self._model_save_interval = model_save_interval
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
increase_or_decrease = validation_metric[0]
if increase_or_decrease not in ["+", "-"]:
raise ConfigurationError("Validation metrics must specify whether they should increase "
"or decrease by pre-pending the metric name with a +/-.")
self._validation_metric = validation_metric[1:]
self._validation_metric_decreases = increase_or_decrease == "-"
if not isinstance(cuda_device, int) and not isinstance(cuda_device, list):
raise ConfigurationError("Expected an int or list for cuda_device, got {}".format(cuda_device))
if isinstance(cuda_device, list):
logger.info(f"WARNING: Multiple GPU support is experimental not recommended for use. "
"In some cases it may lead to incorrect results or undefined behavior.")
self._multiple_gpu = True
self._cuda_devices = cuda_device
# data_parallel will take care of transfering to cuda devices,
# so the iterator keeps data on CPU.
self._iterator_device = -1
else:
self._multiple_gpu = False
self._cuda_devices = [cuda_device]
self._iterator_device = cuda_device
if self._cuda_devices[0] != -1:
self._model = self._model.cuda(self._cuda_devices[0])
self._log_interval = 10 # seconds
self._summary_interval = summary_interval
self._histogram_interval = histogram_interval
self._log_histograms_this_batch = False
# We keep the total batch number as a class variable because it
# is used inside a closure for the hook which logs activations in
# ``_enable_activation_logging``.
self._batch_num_total = 0
self._last_log = 0.0 # time of last logging
if serialization_dir is not None:
train_log = SummaryWriter(os.path.join(serialization_dir, "log", "train"))
validation_log = SummaryWriter(os.path.join(serialization_dir, "log", "validation"))
self._tensorboard = TensorboardWriter(train_log, validation_log)
else:
self._tensorboard = TensorboardWriter()
self._warned_tqdm_ignores_underscores = False
def _enable_gradient_clipping(self) -> None:
if self._grad_clipping is not None:
# Pylint is unable to tell that we're in the case that _grad_clipping is not None...
# pylint: disable=invalid-unary-operand-type
clip_function = lambda grad: grad.clamp(-self._grad_clipping, self._grad_clipping)
for parameter in self._model.parameters():
if parameter.requires_grad:
parameter.register_hook(clip_function)
def _enable_activation_logging(self) -> None:
"""
Log activations to tensorboard
"""
if self._histogram_interval is not None:
# To log activation histograms to the forward pass, we register
# a hook on forward to capture the output tensors.
# This uses a closure on self._log_histograms_this_batch to
# determine whether to send the activations to tensorboard,
# since we don't want them on every call.
for _, module in self._model.named_modules():
if not getattr(module, 'should_log_activations', False):
# skip it
continue
def hook(module_, inputs, outputs):
# pylint: disable=unused-argument,cell-var-from-loop
log_prefix = 'activation_histogram/{0}'.format(module_.__class__)
if self._log_histograms_this_batch:
if isinstance(outputs, torch.Tensor):
log_name = log_prefix
self._tensorboard.add_train_histogram(log_name,
outputs,
self._batch_num_total)
elif isinstance(outputs, (list, tuple)):
for i, output in enumerate(outputs):
log_name = "{0}_{1}".format(log_prefix, i)
self._tensorboard.add_train_histogram(log_name,
output,
self._batch_num_total)
elif isinstance(outputs, dict):
for k, tensor in outputs.items():
log_name = "{0}_{1}".format(log_prefix, k)
self._tensorboard.add_train_histogram(log_name,
tensor,
self._batch_num_total)
else:
# skip it
pass
module.register_forward_hook(hook)
def _rescale_gradients(self) -> Optional[float]:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
"""
if self._grad_norm:
parameters_to_clip = [p for p in self._model.parameters()
if p.grad is not None]
return sparse_clip_norm(parameters_to_clip, self._grad_norm)
return None
def _data_parallel(self, batch):
"""
Do the forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
used_device_ids = self._cuda_devices[:len(inputs)]
replicas = replicate(self._model, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {'loss': losses.mean()}
def _batch_loss(self, batch: torch.Tensor, for_training: bool) -> torch.Tensor:
"""
Does a forward pass on the given batch and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
if self._multiple_gpu:
output_dict = self._data_parallel(batch)
else:
output_dict = self._model(**batch)
try:
loss = output_dict["loss"]
if for_training:
loss += self._model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError("The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
loss = None
return loss
def _get_metrics(self, total_loss: float, num_batches: int, reset: bool = False) -> Dict[str, float]:
"""
Gets the metrics but sets ``"loss"`` to
the total loss divided by the ``num_batches`` so that
the ``"loss"`` metric is "average loss per batch".
"""
metrics = self._model.get_metrics(reset=reset)
metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0
return metrics
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
logger.info(f"Peak CPU memory usage MB: {peak_memory_mb()}")
for gpu, memory in gpu_memory_mb().items():
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self._model.train()
# Get tqdm for the training batches
train_generator = self._iterator(self._train_data,
num_epochs=1,
cuda_device=self._iterator_device)
num_training_batches = self._iterator.get_num_batches(self._train_data)
train_generator_tqdm = Tqdm.tqdm(train_generator,
total=num_training_batches)
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
if self._histogram_interval is not None:
histogram_parameters = set(self._model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
for batch in train_generator_tqdm:
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
self._log_histograms_this_batch = self._histogram_interval is not None and (
batch_num_total % self._histogram_interval == 0)
self._optimizer.zero_grad()
loss = self._batch_loss(batch, for_training=True)
loss.backward()
train_loss += loss.item()
batch_grad_norm = self._rescale_gradients()
# This does nothing if batch_num_total is None or you are using an
# LRScheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._log_histograms_this_batch:
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {name: param.detach().cpu().clone()
for name, param in self._model.named_parameters()}
self._optimizer.step()
for name, param in self._model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1, ))
param_norm = torch.norm(param.view(-1, ))
self._tensorboard.add_train_scalar("gradient_update/" + name,
update_norm / (param_norm + 1e-7),
batch_num_total)
else:
self._optimizer.step()
# Update the description with the latest metrics
metrics = self._get_metrics(train_loss, batches_this_epoch)
description = self._description_from_metrics(metrics)
train_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard
if batch_num_total % self._summary_interval == 0:
self._parameter_and_gradient_statistics_to_tensorboard(batch_num_total, batch_grad_norm)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"], batch_num_total)
self._metrics_to_tensorboard(batch_num_total,
{"epoch_metrics/" + k: v for k, v in metrics.items()})
if self._log_histograms_this_batch:
self._histograms_to_tensorboard(batch_num_total, histogram_parameters)
# Save model if needed.
if self._model_save_interval is not None and (
time.time() - last_save_time > self._model_save_interval
):
last_save_time = time.time()
self._save_checkpoint(
'{0}.{1}'.format(epoch, time_to_str(int(last_save_time))), [], is_best=False
)
return self._get_metrics(train_loss, batches_this_epoch, reset=True)
def _should_stop_early(self, metric_history: List[float]) -> bool:
"""
uses patience and the validation metric to determine if training should stop early
"""
if self._patience and self._patience < len(metric_history):
# Pylint can't figure out that in this branch `self._patience` is an int.
# pylint: disable=invalid-unary-operand-type
# Is the best score in the past N epochs worse than or equal the best score overall?
if self._validation_metric_decreases:
return min(metric_history[-self._patience:]) >= min(metric_history[:-self._patience])
else:
return max(metric_history[-self._patience:]) <= max(metric_history[:-self._patience])
return False
def _parameter_and_gradient_statistics_to_tensorboard(self, # pylint: disable=invalid-name
epoch: int,
batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
# Log parameter values to Tensorboard
for name, param in self._model.named_parameters():
self._tensorboard.add_train_scalar("parameter_mean/" + name,
param.data.mean(),
epoch)
self._tensorboard.add_train_scalar("parameter_std/" + name, param.data.std(), epoch)
if param.grad is not None:
if is_sparse(param.grad):
# pylint: disable=protected-access
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable
self._tensorboard.add_train_scalar("gradient_mean/" + name,
grad_data.mean(),
epoch)
self._tensorboard.add_train_scalar("gradient_std/" + name,
grad_data.std(),
epoch)
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self._tensorboard.add_train_scalar("gradient_norm",
batch_grad_norm,
epoch)
def _histograms_to_tensorboard(self, epoch: int, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for name, param in self._model.named_parameters():
if name in histogram_parameters:
self._tensorboard.add_train_histogram("parameter_histogram/" + name,
param,
epoch)
def _metrics_to_tensorboard(self,
epoch: int,
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
"""
metric_names = set(train_metrics.keys())
if val_metrics is not None:
metric_names.update(val_metrics.keys())
val_metrics = val_metrics or {}
for name in metric_names:
train_metric = train_metrics.get(name)
if train_metric is not None:
self._tensorboard.add_train_scalar(name, train_metric, epoch)
val_metric = val_metrics.get(name)
if val_metric is not None:
self._tensorboard.add_validation_scalar(name, val_metric, epoch)
def _metrics_to_console(self, # pylint: disable=no-self-use
train_metrics: dict,
val_metrics: dict = None) -> None:
"""
Logs all of the train metrics (and validation metrics, if provided) to the console.
"""
val_metrics = val_metrics or {}
dual_message_template = "Training %s : %3f Validation %s : %3f "
message_template = "%s %s : %3f "
metric_names = set(train_metrics.keys())
if val_metrics:
metric_names.update(val_metrics.keys())
for name in metric_names:
train_metric = train_metrics.get(name)
val_metric = val_metrics.get(name)
if val_metric is not None and train_metric is not None:
logger.info(dual_message_template, name, train_metric, name, val_metric)
elif val_metric is not None:
logger.info(message_template, "Validation", name, val_metric)
elif train_metric is not None:
logger.info(message_template, "Training", name, train_metric)
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self._model.eval()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self._iterator
val_generator = val_iterator(self._validation_data,
num_epochs=1,
cuda_device=self._iterator_device)
num_validation_batches = val_iterator.get_num_batches(self._validation_data)
val_generator_tqdm = Tqdm.tqdm(val_generator,
total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch in val_generator_tqdm:
loss = self._batch_loss(batch, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = self._get_metrics(val_loss, batches_this_epoch)
description = self._description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
return val_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
epoch_counter, validation_metric_per_epoch = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError("Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?")
self._enable_gradient_clipping()
self._enable_activation_logging()
logger.info("Beginning training.")
train_metrics: Dict[str, float] = {}
val_metrics: Dict[str, float] = {}
epochs_trained = 0
training_start_time = time.time()
for epoch in range(epoch_counter, self._num_epochs):
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
if self._validation_data is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, num_batches = self._validation_loss()
val_metrics = self._get_metrics(val_loss, num_batches, reset=True)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
# Check validation metric to see if it's the best so far
is_best_so_far = self._is_best_so_far(this_epoch_val_metric, validation_metric_per_epoch)
validation_metric_per_epoch.append(this_epoch_val_metric)
if self._should_stop_early(validation_metric_per_epoch):
logger.info("Ran out of patience. Stopping training.")
break
else:
# No validation set, so just assume it's the best so far.
is_best_so_far = True
val_metrics = {}
this_epoch_val_metric = None
self._save_checkpoint(epoch, validation_metric_per_epoch, is_best=is_best_so_far)
self._metrics_to_tensorboard(epoch, train_metrics, val_metrics=val_metrics)
self._metrics_to_console(train_metrics, val_metrics)
if self._learning_rate_scheduler:
# The LRScheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", time.strftime("%H:%M:%S", time.gmtime(epoch_elapsed_time)))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * \
((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)
formatted_time = time.strftime("%H:%M:%S", time.gmtime(estimated_time_remaining))
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
training_elapsed_time = time.time() - training_start_time
metrics = {
"training_duration": time.strftime("%H:%M:%S", time.gmtime(training_elapsed_time)),
"training_start_epoch": epoch_counter,
"training_epochs": epochs_trained
}
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if validation_metric_per_epoch:
# We may not have had validation data, so we need to hide this behind an if.
if self._validation_metric_decreases:
best_validation_metric = min(validation_metric_per_epoch)
else:
best_validation_metric = max(validation_metric_per_epoch)
metrics[f"best_validation_{self._validation_metric}"] = best_validation_metric
metrics['best_epoch'] = [i for i, value in enumerate(validation_metric_per_epoch)
if value == best_validation_metric][-1]
return metrics
def _is_best_so_far(self,
this_epoch_val_metric: float,
validation_metric_per_epoch: List[float]):
if not validation_metric_per_epoch:
return True
elif self._validation_metric_decreases:
return this_epoch_val_metric < min(validation_metric_per_epoch)
else:
return this_epoch_val_metric > max(validation_metric_per_epoch)
def _description_from_metrics(self, metrics: Dict[str, float]) -> str:
if (not self._warned_tqdm_ignores_underscores and
any(metric_name.startswith("_") for metric_name in metrics)):
logger.warning("Metrics with names beginning with \"_\" will "
"not be logged to the tqdm progress bar.")
self._warned_tqdm_ignores_underscores = True
return ', '.join(["%s: %.4f" % (name, value) for name, value in
metrics.items() if not name.startswith("_")]) + " ||"
def _save_checkpoint(self,
epoch: Union[int, str],
val_metric_per_epoch: List[float],
is_best: Optional[bool] = None) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
Parameters
----------
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
is_best: bool, optional (default = None)
A flag which causes the model weights at the given epoch to
be copied to a "best.th" file. The value of this flag should
be based on some validation metric computed by your model.
"""
if self._serialization_dir is not None:
model_path = os.path.join(self._serialization_dir, "model_state_epoch_{}.th".format(epoch))
model_state = self._model.state_dict()
torch.save(model_state, model_path)
training_state = {'epoch': epoch,
'val_metric_per_epoch': val_metric_per_epoch,
'optimizer': self._optimizer.state_dict(),
'batch_num_total': self._batch_num_total}
training_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch))
torch.save(training_state, training_path)
if is_best:
logger.info("Best validation performance so far. "
"Copying weights to '%s/best.th'.", self._serialization_dir)
shutil.copyfile(model_path, os.path.join(self._serialization_dir, "best.th"))
if self._num_serialized_models_to_keep and self._num_serialized_models_to_keep >= 0:
self._serialized_paths.append([time.time(), model_path, training_path])
if len(self._serialized_paths) > self._num_serialized_models_to_keep:
paths_to_remove = self._serialized_paths.pop(0)
# Check to see if we should keep this checkpoint, if it has been longer
# then self._keep_serialized_model_every_num_seconds since the last
# kept checkpoint.
remove_path = True
if self._keep_serialized_model_every_num_seconds is not None:
save_time = paths_to_remove[0]
time_since_checkpoint_kept = save_time - self._last_permanent_saved_checkpoint_time
if time_since_checkpoint_kept > self._keep_serialized_model_every_num_seconds:
# We want to keep this checkpoint.
remove_path = False
self._last_permanent_saved_checkpoint_time = save_time
if remove_path:
for fname in paths_to_remove[1:]:
os.remove(fname)
def find_latest_checkpoint(self) -> Tuple[str, str]:
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (self._serialization_dir is not None and
any("model_state_epoch_" in x for x in os.listdir(self._serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(self._serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_state_epoch" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
found_epochs = [
# pylint: disable=anomalous-backslash-in-string
re.search("model_state_epoch_([0-9\.\-]+)\.th", x).group(1)
for x in model_checkpoints
]
int_epochs: Any = []
for epoch in found_epochs:
pieces = epoch.split('.')
if len(pieces) == 1:
# Just a single epoch without timestamp
int_epochs.append([int(pieces[0]), 0])
else:
# has a timestamp
int_epochs.append([int(pieces[0]), pieces[1]])
last_epoch = sorted(int_epochs, reverse=True)[0]
if last_epoch[1] == 0:
epoch_to_load = str(last_epoch[0])
else:
epoch_to_load = '{0}.{1}'.format(last_epoch[0], last_epoch[1])
model_path = os.path.join(self._serialization_dir,
"model_state_epoch_{}.th".format(epoch_to_load))
training_state_path = os.path.join(self._serialization_dir,
"training_state_epoch_{}.th".format(epoch_to_load))
return (model_path, training_state_path)
def _restore_checkpoint(self) -> Tuple[int, List[float]]:
"""
Restores a model from a serialization_dir to the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
Returns
-------
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
latest_checkpoint = self.find_latest_checkpoint()
if latest_checkpoint is None:
# No checkpoint to restore, start at 0
return 0, []
model_path, training_state_path = latest_checkpoint
# Load the parameters onto CPU, then transfer to GPU.
# This avoids potential OOM on GPU for large models that
# load parameters onto GPU then make a new GPU copy into the parameter
# buffer. The GPU transfer happens implicitly in load_state_dict.
model_state = torch.load(model_path, map_location=util.device_mapping(-1))
training_state = torch.load(training_state_path, map_location=util.device_mapping(-1))
self._model.load_state_dict(model_state)
self._optimizer.load_state_dict(training_state["optimizer"])
move_optimizer_to_cuda(self._optimizer)
# We didn't used to save `validation_metric_per_epoch`, so we can't assume
# that it's part of the trainer state. If it's not there, an empty list is all
# we can do.
if "val_metric_per_epoch" not in training_state:
logger.warning("trainer state `val_metric_per_epoch` not found, using empty list")
val_metric_per_epoch: List[float] = []
else:
val_metric_per_epoch = training_state["val_metric_per_epoch"]
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split('.')[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get('batch_num_total')
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return, val_metric_per_epoch
# Requires custom from_params.
@classmethod
def from_params(cls,
model: Model,
serialization_dir: str,
iterator: DataIterator,
train_data: Iterable[Instance],
validation_data: Optional[Iterable[Instance]],
params: Params,
validation_iterator: DataIterator = None) -> 'Trainer':
patience = params.pop_int("patience", None)
validation_metric = params.pop("validation_metric", "-loss")
num_epochs = params.pop_int("num_epochs", 20)
cuda_device = params.pop_int("cuda_device", -1)
grad_norm = params.pop_float("grad_norm", None)
grad_clipping = params.pop_float("grad_clipping", None)
lr_scheduler_params = params.pop("learning_rate_scheduler", None)
if cuda_device >= 0:
model = model.cuda(cuda_device)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))
if lr_scheduler_params:
scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
else:
scheduler = None
num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
keep_serialized_model_every_num_seconds = params.pop_int(
"keep_serialized_model_every_num_seconds", None)
model_save_interval = params.pop_float("model_save_interval", None)
summary_interval = params.pop_int("summary_interval", 100)
histogram_interval = params.pop_int("histogram_interval", None)
params.assert_empty(cls.__name__)
return Trainer(model, optimizer, iterator,
train_data, validation_data,
patience=patience,
validation_metric=validation_metric,
validation_iterator=validation_iterator,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=scheduler,
num_serialized_models_to_keep=num_serialized_models_to_keep,
keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,
model_save_interval=model_save_interval,
summary_interval=summary_interval,
histogram_interval=histogram_interval)
| [
"torch.nn.parallel.replicate",
"torch.no_grad",
"torch.save",
"torch.nn.parallel.scatter_gather.scatter_kwargs",
"torch.tensor",
"torch.nn.parallel.parallel_apply"
] | 0.4.0 | albert-dot-ai/allennlp | 580dc8b0e2c6491d4d75b54c3b15b34b462e0c67 |
1.9 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from typing import List, Tuple, Optional
import fastmri
import torch
import torch.nn as nn
import torch.nn.functional as F
from fastmri.data import transforms
from .unet import Unet
class NormUnet(nn.Module):
"""
Normalized U-Net model.
This is the same as a regular U-Net, but with normalization applied to the
input before the U-Net. This keeps the values more numerically stable
during training.
"""
def __init__(
self,
chans: int,
num_pools: int,
in_chans: int = 2,
out_chans: int = 2,
drop_prob: float = 0.0,
):
"""
Args:
chans: Number of output channels of the first convolution layer.
num_pools: Number of down-sampling and up-sampling layers.
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
drop_prob: Dropout probability.
"""
super().__init__()
self.unet = Unet(
in_chans=in_chans,
out_chans=out_chans,
chans=chans,
num_pool_layers=num_pools,
drop_prob=drop_prob,
)
def complex_to_chan_dim(self, x: torch.Tensor) -> torch.Tensor:
b, c, h, w, two = x.shape
assert two == 2
return x.permute(0, 4, 1, 2, 3).reshape(b, 2 * c, h, w)
def chan_complex_to_last_dim(self, x: torch.Tensor) -> torch.Tensor:
b, c2, h, w = x.shape
assert c2 % 2 == 0
c = c2 // 2
return x.view(b, 2, c, h, w).permute(0, 2, 3, 4, 1).contiguous()
def norm(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# group norm
b, c, h, w = x.shape
x = x.view(b, 2, c // 2 * h * w)
mean = x.mean(dim=2).view(b, 2, 1, 1)
std = x.std(dim=2).view(b, 2, 1, 1)
x = x.view(b, c, h, w)
return (x - mean) / std, mean, std
def unnorm(
self, x: torch.Tensor, mean: torch.Tensor, std: torch.Tensor
) -> torch.Tensor:
return x * std + mean
def pad(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, Tuple[List[int], List[int], int, int]]:
_, _, h, w = x.shape
w_mult = ((w - 1) | 15) + 1
h_mult = ((h - 1) | 15) + 1
w_pad = [math.floor((w_mult - w) / 2), math.ceil((w_mult - w) / 2)]
h_pad = [math.floor((h_mult - h) / 2), math.ceil((h_mult - h) / 2)]
# TODO: fix this type when PyTorch fixes theirs
# the documentation lies - this actually takes a list
# https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py#L3457
# https://github.com/pytorch/pytorch/pull/16949
x = F.pad(x, w_pad + h_pad)
return x, (h_pad, w_pad, h_mult, w_mult)
def unpad(
self,
x: torch.Tensor,
h_pad: List[int],
w_pad: List[int],
h_mult: int,
w_mult: int,
) -> torch.Tensor:
return x[..., h_pad[0] : h_mult - h_pad[1], w_pad[0] : w_mult - w_pad[1]]
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not x.shape[-1] == 2:
raise ValueError("Last dimension must be 2 for complex.")
# get shapes for unet and normalize
x = self.complex_to_chan_dim(x)
x, mean, std = self.norm(x)
x, pad_sizes = self.pad(x)
x = self.unet(x)
# get shapes back and unnormalize
x = self.unpad(x, *pad_sizes)
x = self.unnorm(x, mean, std)
x = self.chan_complex_to_last_dim(x)
return x
class SensitivityModel(nn.Module):
"""
Model for learning sensitivity estimation from k-space data.
This model applies an IFFT to multichannel k-space data and then a U-Net
to the coil images to estimate coil sensitivities. It can be used with the
end-to-end variational network.
"""
def __init__(
self,
chans: int,
num_pools: int,
in_chans: int = 2,
out_chans: int = 2,
drop_prob: float = 0.0,
mask_center: bool = True,
):
"""
Args:
chans: Number of output channels of the first convolution layer.
num_pools: Number of down-sampling and up-sampling layers.
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
drop_prob: Dropout probability.
mask_center: Whether to mask center of k-space for sensitivity map
calculation.
"""
super().__init__()
self.mask_center = mask_center
self.norm_unet = NormUnet(
chans,
num_pools,
in_chans=in_chans,
out_chans=out_chans,
drop_prob=drop_prob,
)
def chans_to_batch_dim(self, x: torch.Tensor) -> Tuple[torch.Tensor, int]:
b, c, h, w, comp = x.shape
return x.view(b * c, 1, h, w, comp), b
def batch_chans_to_chan_dim(self, x: torch.Tensor, batch_size: int) -> torch.Tensor:
bc, _, h, w, comp = x.shape
c = bc // batch_size
return x.view(batch_size, c, h, w, comp)
def divide_root_sum_of_squares(self, x: torch.Tensor) -> torch.Tensor:
return x / fastmri.rss_complex(x, dim=1).unsqueeze(-1).unsqueeze(1)
def get_pad_and_num_low_freqs(
self, mask: torch.Tensor, num_low_frequencies: Optional[int] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
if num_low_frequencies is None:
# get low frequency line locations and mask them out
squeezed_mask = mask[:, 0, 0, :, 0].to(torch.int8)
cent = squeezed_mask.shape[1] // 2
# running argmin returns the first non-zero
left = torch.argmin(squeezed_mask[:, :cent].flip(1), dim=1)
right = torch.argmin(squeezed_mask[:, cent:], dim=1)
num_low_frequencies_tensor = torch.max(
2 * torch.min(left, right), torch.ones_like(left)
) # force a symmetric center unless 1
else:
num_low_frequencies_tensor = num_low_frequencies * torch.ones(
mask.shape[0], dtype=mask.dtype, device=mask.device
)
pad = (mask.shape[-2] - num_low_frequencies_tensor + 1) // 2
return pad, num_low_frequencies_tensor
def forward(
self,
masked_kspace: torch.Tensor,
mask: torch.Tensor,
num_low_frequencies: Optional[int] = None,
) -> torch.Tensor:
if self.mask_center:
pad, num_low_freqs = self.get_pad_and_num_low_freqs(
mask, num_low_frequencies
)
masked_kspace = transforms.batched_mask_center(
masked_kspace, pad, pad + num_low_freqs
)
# convert to image space
images, batches = self.chans_to_batch_dim(fastmri.ifft2c(masked_kspace))
# estimate sensitivities
return self.divide_root_sum_of_squares(
self.batch_chans_to_chan_dim(self.norm_unet(images), batches)
)
class VarNet(nn.Module):
"""
A full variational network model.
This model applies a combination of soft data consistency with a U-Net
regularizer. To use non-U-Net regularizers, use VarNetBlock.
"""
def __init__(
self,
num_cascades: int = 12,
sens_chans: int = 8,
sens_pools: int = 4,
chans: int = 18,
pools: int = 4,
mask_center: bool = True,
):
"""
Args:
num_cascades: Number of cascades (i.e., layers) for variational
network.
sens_chans: Number of channels for sensitivity map U-Net.
sens_pools Number of downsampling and upsampling layers for
sensitivity map U-Net.
chans: Number of channels for cascade U-Net.
pools: Number of downsampling and upsampling layers for cascade
U-Net.
mask_center: Whether to mask center of k-space for sensitivity map
calculation.
"""
super().__init__()
self.sens_net = SensitivityModel(
chans=sens_chans,
num_pools=sens_pools,
mask_center=mask_center,
)
self.cascades = nn.ModuleList(
[VarNetBlock(NormUnet(chans, pools)) for _ in range(num_cascades)]
)
def forward(
self,
masked_kspace: torch.Tensor,
mask: torch.Tensor,
num_low_frequencies: Optional[int] = None,
) -> torch.Tensor:
sens_maps = self.sens_net(masked_kspace, mask, num_low_frequencies)
kspace_pred = masked_kspace.clone()
for cascade in self.cascades:
kspace_pred = cascade(kspace_pred, masked_kspace, mask, sens_maps)
return fastmri.rss(fastmri.complex_abs(fastmri.ifft2c(kspace_pred)), dim=1)
class VarNetBlock(nn.Module):
"""
Model block for end-to-end variational network.
This model applies a combination of soft data consistency with the input
model as a regularizer. A series of these blocks can be stacked to form
the full variational network.
"""
def __init__(self, model: nn.Module):
"""
Args:
model: Module for "regularization" component of variational
network.
"""
super().__init__()
self.model = model
self.dc_weight = nn.Parameter(torch.ones(1))
def sens_expand(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
return fastmri.fft2c(fastmri.complex_mul(x, sens_maps))
def sens_reduce(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
x = fastmri.ifft2c(x)
return fastmri.complex_mul(x, fastmri.complex_conj(sens_maps)).sum(
dim=1, keepdim=True
)
def forward(
self,
current_kspace: torch.Tensor,
ref_kspace: torch.Tensor,
mask: torch.Tensor,
sens_maps: torch.Tensor,
) -> torch.Tensor:
zero = torch.zeros(1, 1, 1, 1, 1).to(current_kspace)
soft_dc = torch.where(mask, current_kspace - ref_kspace, zero) * self.dc_weight
model_term = self.sens_expand(
self.model(self.sens_reduce(current_kspace, sens_maps)), sens_maps
)
return current_kspace - soft_dc - model_term
| [
"torch.zeros",
"torch.min",
"torch.argmin",
"torch.ones",
"torch.ones_like",
"torch.nn.functional.pad",
"torch.where"
] | 1.9.0 | vigsivan/fastMRI | 0f6c4c0176ff74bf2761d20ec62facb01c9038f8 |
1.13 | import csv
import decimal
import os
import threading
import time
from typing import List
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed import rpc
from .trpc_server import TRPCCOMMServicer
from ..base_com_manager import BaseCommunicationManager
from ..message import Message
from ..observer import Observer
import logging
lock = threading.Lock()
WORKER = "worker{}"
class TRPCCommManager(BaseCommunicationManager):
def __init__(
self,
trpc_master_config_path,
process_id=0,
world_size=0,
):
logging.info("using TRPC backend")
with open(trpc_master_config_path, newline="") as csv_file:
csv_reader = csv.reader(csv_file)
# skip header line
next(csv_reader)
master_address, master_port = next(csv_reader)
self.master_address = master_address
self.master_port = master_port
self.process_id = process_id
self.world_size = world_size
self._observers: List[Observer] = []
if process_id == 0:
self.node_type = "server"
else:
self.node_type = "client"
print(f"Worker rank {process_id} initializing RPC")
self.trpc_servicer = TRPCCOMMServicer(
master_address, master_port, self.world_size, process_id
)
logging.info(os.getcwd())
os.environ["MASTER_ADDR"] = self.master_address
os.environ["MASTER_PORT"] = self.master_port
self._init_torch_rpc_tp(
master_address, master_port, process_id, self.world_size
)
self.is_running = True
print("server started. master address: " + str(master_address))
def _init_torch_rpc_pg(
self,
master_addr,
master_port,
worker_idx,
worker_num,
):
# https://github.com/pytorch/pytorch/issues/55615
# [BC-Breaking][RFC] Retire ProcessGroup Backend for RPC #55615
str_init_method = "tcp://" + str(master_addr) + ":" + str(master_port)
logging.info("str_init_method = {}".format(str_init_method))
options = rpc.ProcessGroupRpcBackendOptions(
num_send_recv_threads=4, init_method=str_init_method, rpc_timeout=60.0
)
rpc.init_rpc(
WORKER.format(worker_idx),
backend=dist.rpc.BackendType.PROCESS_GROUP,
rank=worker_idx,
world_size=worker_num,
rpc_backend_options=options,
)
# torch.distributed.rpc.init_rpc('worker', rank=self.global_rank, world_size=self.world_size)
logging.info("_init_rpc_with_process_group finished.")
def _init_torch_rpc_tp(
self,
master_addr,
master_port,
worker_idx,
worker_num,
):
# https://github.com/pytorch/pytorch/issues/55615
# [BC-Breaking][RFC] Retire ProcessGroup Backend for RPC #55615
str_init_method = "tcp://" + str(master_addr) + ":10000"
logging.info("str_init_method = {}".format(str_init_method))
options = rpc.TensorPipeRpcBackendOptions(
num_worker_threads=16,
rpc_timeout=1800,
init_method=str_init_method,
_transports=["uv"],
)
rpc.init_rpc(
WORKER.format(worker_idx),
backend=rpc.BackendType.TENSORPIPE,
rank=worker_idx,
world_size=worker_num,
rpc_backend_options=options,
)
logging.info("_init_torch_rpc_tp finished.")
def send_message(self, msg: Message):
receiver_id = msg.get_receiver_id()
logging.info("sending message to {}".format(receiver_id))
# Should I wait?
rpc.rpc_sync(
WORKER.format(receiver_id),
TRPCCOMMServicer.sendMessage,
args=(self.process_id, msg),
)
logging.debug("sent")
def add_observer(self, observer: Observer):
self._observers.append(observer)
def remove_observer(self, observer: Observer):
self._observers.remove(observer)
def handle_receive_message(self):
thread = threading.Thread(target=self.message_handling_subroutine)
thread.start()
def message_handling_subroutine(self):
while self.is_running:
if self.trpc_servicer.message_q.qsize() > 0:
lock.acquire()
msg = self.trpc_servicer.message_q.get()
self.notify(msg)
lock.release()
return
def stop_receive_message(self):
rpc.shutdown()
self.is_running = False
def notify(self, message: Message):
msg_type = message.get_type()
for observer in self._observers:
observer.receive_message(msg_type, message)
def run_worker(rank, world_size):
r"""
A wrapper function that initializes RPC, calls the function, and shuts down
RPC.
"""
if rank == 1:
com_manager_client = TRPCCommManager(
"./trpc_master_config.csv", rank, world_size
)
start = time.time()
tensor = torch.ones(1000, 1000)
message = Message(type="test", sender_id=rank, receiver_id="1")
message.add_params("THE_TENSOR", tensor)
TRPCCOMMServicer.sendMessage("worker0", message)
message_values = []
message = Message(type="test", sender_id=rank, receiver_id="1")
message2 = Message(type="test", sender_id=rank, receiver_id="1")
message.add_params("THE_TENSOR", tensor)
for i in range(100):
print("###############################")
print("Measuring for Single Message")
for size in [100, 1000, 10000]:
# for size in [100, 1000]:
print(f"======= size = {size} =====")
tensor = torch.ones(size, size)
start = time.time()
TRPCCOMMServicer.sendMessageTest1("worker0", message)
end = time.time()
duration = end - start
message_values.append(duration)
# print(f"Message tensor size={size} duration={str(duration)}", flush=True)
print("###############################")
print("Measuring for Message with separate Tensor")
sinle_tensor_values = []
start = time.time()
for size in [100, 1000, 10000]:
# for size in [100, 1000]:
print(f"======= size = {size} =====")
tensor = torch.ones(size, size)
# message = Message(type="test", sender_id=rank, receiver_id="1")
# message.add_params("THE_TENSOR", tensor)
start = time.time()
TRPCCOMMServicer.sendMessageTest2(
"worker0", message2.get_params(), tensor
)
end = time.time()
duration = end - start
# print(f"Single tensor size={size} duration={str(duration)}", flush=True)
sinle_tensor_values.append(duration)
print(
"mean message: "
+ str(decimal.Decimal(sum(message_values) / len(message_values)))
)
print(
"mean single tensor: "
+ str(decimal.Decimal(sum(sinle_tensor_values) / len(sinle_tensor_values)))
)
# ret = rpc.rpc_sync("worker1", TRPCCOMMServicer., args=(torch.ones(2), torch.ones(2)))
else:
# parameter server does nothing
com_manager_client = TRPCCommManager(
"./trpc_master_config.csv", rank, world_size
)
rpc.shutdown()
if __name__ == "__main__":
world_size = 2
# run_worker(0,1)
mp.spawn(run_worker, args=(world_size,), nprocs=world_size, join=True)
| [
"torch.distributed.rpc.TensorPipeRpcBackendOptions",
"torch.multiprocessing.spawn",
"torch.distributed.rpc.ProcessGroupRpcBackendOptions",
"torch.ones",
"torch.distributed.rpc.shutdown"
] | 1.13.1 | eliaskousk/FedML | e30d5dd3cc84c8a369c828a6f6ef097b3cf67b1a |
1.3 | # General structure from https://github.com/pytorch/examples/blob/master/mnist/main.py
from __future__ import print_function
import argparse
import os
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import CosineAnnealingLR
import torch.autograd as autograd
args = None
class GetSubnet(autograd.Function):
@staticmethod
def forward(ctx, scores, k):
# Get the supermask by sorting the scores and using the top k%
out = scores.clone()
_, idx = scores.flatten().sort()
j = int((1 - k) * scores.numel())
# flat_out and out access the same memory.
flat_out = out.flatten()
flat_out[idx[:j]] = 0
flat_out[idx[j:]] = 1
return out
@staticmethod
def backward(ctx, g):
# send the gradient g straight-through on the backward pass.
return g, None
class SupermaskConv(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# initialize the scores
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
# NOTE: initialize the weights like this.
nn.init.kaiming_normal_(self.weight, mode="fan_in", nonlinearity="relu")
# NOTE: turn the gradient on the weights off
self.weight.requires_grad = False
def forward(self, x):
subnet = GetSubnet.apply(self.scores.abs(), args.sparsity)
w = self.weight * subnet
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
class SupermaskLinear(nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# initialize the scores
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
# NOTE: initialize the weights like this.
nn.init.kaiming_normal_(self.weight, mode="fan_in", nonlinearity="relu")
# NOTE: turn the gradient on the weights off
self.weight.requires_grad = False
def forward(self, x):
subnet = GetSubnet.apply(self.scores.abs(), args.sparsity)
w = self.weight * subnet
return F.linear(x, w, self.bias)
return x
# NOTE: not used here but we use NON-AFFINE Normalization!
# So there is no learned parameters for your nomralization layer.
class NonAffineBatchNorm(nn.BatchNorm2d):
def __init__(self, dim):
super(NonAffineBatchNorm, self).__init__(dim, affine=False)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = SupermaskConv(1, 32, 3, 1, bias=False)
self.conv2 = SupermaskConv(32, 64, 3, 1, bias=False)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = SupermaskLinear(9216, 128, bias=False)
self.fc2 = SupermaskLinear(128, 10, bias=False)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(model, device, train_loader, optimizer, criterion, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(model, device, criterion, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
global args
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=0.0005, metavar='M',
help='Weight decay (default: 0.0005)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--data', type=str, default='../data', help='Location to store data')
parser.add_argument('--sparsity', type=float, default=0.5,
help='how sparse is each layer')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print (device)
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(args.data, 'mnist'), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(args.data, 'mnist'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
# NOTE: only pass the parameters where p.requires_grad == True to the optimizer! Important!
optimizer = optim.SGD(
[p for p in model.parameters() if p.requires_grad],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd,
)
criterion = nn.CrossEntropyLoss().to(device)
scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs)
for epoch in range(1, args.epochs + 1):
train(model, device, train_loader, optimizer, criterion, epoch)
test(model, device, criterion, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.flatten",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad",
"torch.nn.init.kaiming_normal_",
"torch.nn.functional.log_softmax",
"torch.manual_seed",
"torch.nn.Dropout2d",
"torch.nn.functional.linear",
"torch.cuda.is_available",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.conv2d",
"torch.nn.CrossEntropyLoss"
] | 1.3.0 | weizhonz/hid | 3ee3aeeaf12baeadf3d85c1bb86296073bba3fbe |
1.6 | import dataclasses
import itertools
from typing import List, Optional, Tuple
import nltk
import torch
from .downloader import load_trained_model
from ..parse_base import BaseParser, BaseInputExample
from ..ptb_unescape import ptb_unescape, guess_space_after
TOKENIZER_LOOKUP = {
"en": "english",
"de": "german",
"fr": "french",
"pl": "polish",
"sv": "swedish",
}
LANGUAGE_GUESS = {
"ar": ("X", "XP", "WHADVP", "WHNP", "WHPP"),
"zh": ("VSB", "VRD", "VPT", "VNV"),
"en": ("WHNP", "WHADJP", "SINV", "SQ"),
"de": ("AA", "AP", "CCP", "CH", "CNP", "VZ"),
"fr": ("P+", "P+D+", "PRO+", "PROREL+"),
"he": ("PREDP", "SYN_REL", "SYN_yyDOT"),
"pl": ("formaczas", "znakkonca"),
"sv": ("PSEUDO", "AVP", "XP"),
}
def guess_language(label_vocab):
"""Guess parser language based on its syntactic label inventory.
The parser training scripts are designed to accept arbitrary input tree
files with minimal language-specific behavior, but at inference time we may
need to know the language identity in order to invoke other pipeline
elements, such as tokenizers.
"""
for language, required_labels in LANGUAGE_GUESS.items():
if all(label in label_vocab for label in required_labels):
return language
return None
@dataclasses.dataclass
class InputSentence(BaseInputExample):
"""Parser input for a single sentence.
At least one of `words` and `escaped_words` is required for each input
sentence. The remaining fields are optional: the parser will attempt to
derive the value for any missing fields using the fields that are provided.
`words` and `space_after` together form a reversible tokenization of the
input text: they represent, respectively, the Unicode text for each word and
an indicator for whether the word is followed by whitespace. These are used
as inputs by the parser.
`tags` is a list of part-of-speech tags, if available prior to running the
parser. The parser does not actually use these tags as input, but it will
pass them through to its output. If `tags` is None, the parser will perform
its own part of speech tagging (if the parser was not trained to also do
tagging, "UNK" part-of-speech tags will be used in the output instead).
`escaped_words` are the representations of each leaf to use in the output
tree. If `words` is provided, `escaped_words` will not be used by the neural
network portion of the parser, and will only be incorporated when
constructing the output tree. Therefore, `escaped_words` may be used to
accommodate any dataset-specific text encoding, such as transliteration.
Here is an example of the differences between these fields for English PTB:
(raw text): "Fly safely."
words: " Fly safely . "
space_after: False True False False False
tags: `` VB RB . ''
escaped_words: `` Fly safely . ''
"""
words: Optional[List[str]] = None
space_after: Optional[List[bool]] = None
tags: Optional[List[str]] = None
escaped_words: Optional[List[str]] = None
@property
def tree(self):
return None
def leaves(self):
return self.escaped_words
def pos(self):
if self.tags is not None:
return list(zip(self.escaped_words, self.tags))
else:
return [(word, "UNK") for word in self.escaped_words]
class Parser:
"""Berkeley Neural Parser (benepar), integrated with NLTK.
Use this class to apply the Berkeley Neural Parser to pre-tokenized datasets
and treebanks, or when integrating the parser into an NLP pipeline that
already performs tokenization, sentence splitting, and (optionally)
part-of-speech tagging. For parsing starting with raw text, it is strongly
encouraged that you use spaCy and benepar.BeneparComponent instead.
Sample usage:
>>> parser = benepar.Parser("benepar_en3")
>>> input_sentence = benepar.InputSentence(
words=['"', 'Fly', 'safely', '.', '"'],
space_after=[False, True, False, False, False],
tags=['``', 'VB', 'RB', '.', "''"],
escaped_words=['``', 'Fly', 'safely', '.', "''"],
)
>>> parser.parse(input_sentence)
Not all fields of benepar.InputSentence are required, but at least one of
`words` and `escaped_words` must not be None. The parser will attempt to
guess the value for missing fields. For example,
>>> input_sentence = benepar.InputSentence(
words=['"', 'Fly', 'safely', '.', '"'],
)
>>> parser.parse(input_sentence)
Although this class is primarily designed for use with data that has already
been tokenized, to help with interactive use and debugging it also accepts
simple text string inputs. However, using this class to parse from raw text
is STRONGLY DISCOURAGED for any application where parsing accuracy matters.
When parsing from raw text, use spaCy and benepar.BeneparComponent instead.
The reason is that parser models do not ship with a tokenizer or sentence
splitter, and some models may not include a part-of-speech tagger either. A
toolkit must be used to fill in these pipeline components, and spaCy
outperforms NLTK in all of these areas (sometimes by a large margin).
>>> parser.parse('"Fly safely."') # For debugging/interactive use only.
"""
def __init__(self, name, batch_size=64, language_code=None):
"""Load a trained parser model.
Args:
name (str): Model name, or path to pytorch saved model
batch_size (int): Maximum number of sentences to process per batch
language_code (str, optional): language code for the parser (e.g.
'en', 'he', 'zh', etc). Our official trained models will set
this automatically, so this argument is only needed if training
on new languages or treebanks.
"""
self._parser = load_trained_model(name)
if torch.cuda.is_available():
self._parser.cuda()
if language_code is not None:
self._language_code = language_code
else:
self._language_code = guess_language(self._parser.config["label_vocab"])
self._tokenizer_lang = TOKENIZER_LOOKUP.get(self._language_code, None)
self.batch_size = batch_size
def parse(self, sentence):
"""Parse a single sentence
Args:
sentence (InputSentence or List[str] or str): Sentence to parse.
If the input is of List[str], it is assumed to be a sequence of
words and will behave the same as only setting the `words` field
of InputSentence. If the input is of type str, the sentence will
be tokenized using the default NLTK tokenizer (not recommended:
if parsing from raw text, use spaCy and benepar.BeneparComponent
instead).
Returns:
nltk.Tree
"""
return list(self.parse_sents([sentence]))[0]
def parse_sents(self, sents):
"""Parse multiple sentences in batches.
Args:
sents (Iterable[InputSentence]): An iterable of sentences to be
parsed. `sents` may also be a string, in which case it will be
segmented into sentences using the default NLTK sentence
splitter (not recommended: if parsing from raw text, use spaCy
and benepar.BeneparComponent instead). Otherwise, each element
of `sents` will be treated as a sentence. The elements of
`sents` may also be List[str] or str: see Parser.parse() for
documentation regarding these cases.
Yields:
nltk.Tree objects, one per input sentence.
"""
if isinstance(sents, str):
if self._tokenizer_lang is None:
raise ValueError(
"No tokenizer available for this language. "
"Please split into individual sentences and tokens "
"before calling the parser."
)
sents = nltk.sent_tokenize(sents, self._tokenizer_lang)
end_sentinel = object()
for batch_sents in itertools.zip_longest(
*([iter(sents)] * self.batch_size), fillvalue=end_sentinel
):
batch_inputs = []
for sent in batch_sents:
if sent is end_sentinel:
break
elif isinstance(sent, str):
if self._tokenizer_lang is None:
raise ValueError(
"No word tokenizer available for this language. "
"Please tokenize before calling the parser."
)
escaped_words = nltk.word_tokenize(sent, self._tokenizer_lang)
sent = InputSentence(escaped_words=escaped_words)
elif isinstance(sent, (list, tuple)):
sent = InputSentence(words=sent)
elif not isinstance(sent, InputSentence):
raise ValueError(
"Sentences must be one of: InputSentence, list, tuple, or str"
)
batch_inputs.append(self._with_missing_fields_filled(sent))
for inp, output in zip(
batch_inputs, self._parser.parse(batch_inputs, return_compressed=True)
):
# If pos tags are provided as input, ignore any tags predicted
# by the parser.
if inp.tags is not None:
output = output.without_predicted_tags()
yield output.to_tree(
inp.pos(),
self._parser.decoder.label_from_index,
self._parser.tag_from_index,
)
def _with_missing_fields_filled(self, sent):
if not isinstance(sent, InputSentence):
raise ValueError("Input is not an instance of InputSentence")
if sent.words is None and sent.escaped_words is None:
raise ValueError("At least one of words or escaped_words is required")
elif sent.words is None:
sent = dataclasses.replace(sent, words=ptb_unescape(sent.escaped_words))
elif sent.escaped_words is None:
escaped_words = [
word.replace("(", "-LRB-")
.replace(")", "-RRB-")
.replace("{", "-LCB-")
.replace("}", "-RCB-")
.replace("[", "-LSB-")
.replace("]", "-RSB-")
for word in sent.words
]
sent = dataclasses.replace(sent, escaped_words=escaped_words)
else:
if len(sent.words) != len(sent.escaped_words):
raise ValueError(
f"Length of words ({len(sent.words)}) does not match "
f"escaped_words ({len(sent.escaped_words)})"
)
if sent.space_after is None:
if self._language_code == "zh":
space_after = [False for _ in sent.words]
elif self._language_code in ("ar", "he"):
space_after = [True for _ in sent.words]
else:
space_after = guess_space_after(sent.words)
sent = dataclasses.replace(sent, space_after=space_after)
elif len(sent.words) != len(sent.space_after):
raise ValueError(
f"Length of words ({len(sent.words)}) does not match "
f"space_after ({len(sent.space_after)})"
)
assert len(sent.words) == len(sent.escaped_words) == len(sent.space_after)
return sent
| [
"torch.cuda.is_available"
] | 1.6.0 | thomaslu2000/Incremental-Parsing-Representations | 1b0ec638e85f0e521a12b53d8b309191c40fe0d3 |
1.5 | # Copyright Contributors to the Pyro project.
# Copyright (c) 2020, YosefLab.
# SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
"""
The data preprocessing code in this script is adapted from:
https://github.com/YosefLab/scvi-tutorials/blob/50dd3269abfe0c375ec47114f2c20725a016736f/seed_labeling.ipynb
"""
import math
import numpy as np
from scipy import sparse
import torch
import torch.nn as nn
class BatchDataLoader(object):
"""
This custom DataLoader serves mini-batches that are either fully-observed (i.e. labeled)
or partially-observed (i.e. unlabeled) but never mixed.
"""
def __init__(self, data_x, data_y, batch_size, num_classes=4, missing_label=-1):
super().__init__()
self.data_x = data_x
self.data_y = data_y
self.batch_size = batch_size
self.num_classes = num_classes
self.unlabeled = torch.where(data_y == missing_label)[0]
self.num_unlabeled = self.unlabeled.size(0)
self.num_unlabeled_batches = math.ceil(self.num_unlabeled / self.batch_size)
self.labeled = torch.where(data_y != missing_label)[0]
self.num_labeled = self.labeled.size(0)
self.num_labeled_batches = math.ceil(self.num_labeled / self.batch_size)
assert self.data_x.size(0) == self.data_y.size(0)
assert len(self) > 0
@property
def size(self):
return self.data_x.size(0)
def __len__(self):
return self.num_unlabeled_batches + self.num_labeled_batches
def _sample_batch_indices(self):
batch_order = torch.randperm(len(self)).tolist()
unlabeled_idx = self.unlabeled[torch.randperm(self.num_unlabeled)]
labeled_idx = self.labeled[torch.randperm(self.num_labeled)]
slices = []
for i in range(self.num_unlabeled_batches):
_slice = unlabeled_idx[i * self.batch_size: (i + 1) * self.batch_size]
slices.append((_slice, False))
for i in range(self.num_labeled_batches):
_slice = labeled_idx[i * self.batch_size: (i + 1) * self.batch_size]
slices.append((_slice, True))
return slices, batch_order
def __iter__(self):
slices, batch_order = self._sample_batch_indices()
for i in range(len(batch_order)):
_slice = slices[batch_order[i]]
if _slice[1]:
# labeled
yield self.data_x[_slice[0]], \
nn.functional.one_hot(self.data_y[_slice[0]], num_classes=self.num_classes)
else:
# unlabeled
yield self.data_x[_slice[0]], None
def _get_score(normalized_adata, gene_set):
"""
Returns the score per cell given a dictionary of + and - genes
"""
score = np.zeros(normalized_adata.n_obs)
for gene in gene_set['positive']:
expression = np.array(normalized_adata[:, gene].X)
score += expression.flatten()
for gene in gene_set['negative']:
expression = np.array(normalized_adata[:, gene].X)
score -= expression.flatten()
return score
def _get_cell_mask(normalized_adata, gene_set):
"""
Calculates the score per cell for a list of genes, then returns a mask for
the cells with the highest 50 scores.
"""
score = _get_score(normalized_adata, gene_set)
cell_idx = score.argsort()[-50:]
mask = np.zeros(normalized_adata.n_obs)
mask[cell_idx] = 1
return mask.astype(bool)
def get_data(dataset="pbmc", batch_size=100, cuda=False):
"""
Does the necessary preprocessing and returns a BatchDataLoader for the PBMC dataset.
"""
assert dataset in ['pbmc', 'mock']
# create mock dataset for CI
if dataset == 'mock':
num_genes = 17
num_data = 200
X = torch.distributions.Poisson(rate=10.0).sample(sample_shape=(num_data, num_genes))
Y = torch.zeros(num_data, dtype=torch.long)
Y[50:100] = 1
Y[100:] = -1
if cuda:
X, Y = X.cuda(), Y.cuda()
return BatchDataLoader(X, Y, batch_size), num_genes, 2.0, 1.0, None
import scvi
import scanpy as sc
adata = scvi.data.purified_pbmc_dataset(subset_datasets=["regulatory_t", "naive_t",
"memory_t", "naive_cytotoxic"])
gene_subset = ["CD4", "FOXP3", "TNFRSF18", "IL2RA", "CTLA4", "CD44", "TCF7",
"CD8B", "CCR7", "CD69", "PTPRC", "S100A4"]
normalized = adata.copy()
sc.pp.normalize_total(normalized, target_sum=1e4)
sc.pp.log1p(normalized)
normalized = normalized[:, gene_subset].copy()
sc.pp.scale(normalized)
# hand curated list of genes for identifying ground truth
cd4_reg_geneset = {"positive": ["TNFRSF18", "CTLA4", "FOXP3", "IL2RA"], "negative": ["S100A4", "PTPRC", "CD8B"]}
cd8_naive_geneset = {"positive": ["CD8B", "CCR7"], "negative": ["CD4"]}
cd4_naive_geneset = {"positive": ["CCR7", "CD4"], "negative": ["S100A4", "PTPRC", "FOXP3", "IL2RA", "CD69"]}
cd4_mem_geneset = {"positive": ["S100A4"], "negative": ["IL2RA", "FOXP3", "TNFRSF18", "CCR7"]}
cd4_reg_mask = _get_cell_mask(normalized, cd4_reg_geneset)
cd8_naive_mask = _get_cell_mask(normalized, cd8_naive_geneset)
cd4_naive_mask = _get_cell_mask(normalized, cd4_naive_geneset)
cd4_mem_mask = _get_cell_mask(normalized, cd4_mem_geneset)
# these will be our seed labels
seed_labels = -np.ones(cd4_mem_mask.shape[0])
seed_labels[cd8_naive_mask] = 0 # "CD8 Naive T cell"
seed_labels[cd4_naive_mask] = 1 # "CD4 Naive T cell"
seed_labels[cd4_mem_mask] = 2 # "CD4 Memory T cell"
seed_labels[cd4_reg_mask] = 3 # "CD4 Regulatory T cell"
# this metadata will be used for plotting
seed_colors = ['lightgray'] * seed_labels.shape[0]
seed_sizes = [0.05] * seed_labels.shape[0]
for i in range(len(seed_colors)):
if seed_labels[i] == 0:
seed_colors[i] = 'lightcoral'
elif seed_labels[i] == 1:
seed_colors[i] = 'limegreen'
elif seed_labels[i] == 2:
seed_colors[i] = 'deepskyblue'
elif seed_labels[i] == 3:
seed_colors[i] = 'mediumorchid'
if seed_labels[i] != -1:
seed_sizes[i] = 25
adata.obs['seed_labels'] = seed_labels
adata.obs['seed_colors'] = seed_colors
adata.obs['seed_marker_sizes'] = seed_sizes
# filter out non-variable genes
adata_filter = adata.copy()
sc.pp.normalize_per_cell(adata_filter, counts_per_cell_after=1e4)
sc.pp.log1p(adata_filter)
sc.pp.highly_variable_genes(adata_filter, min_mean=0.0125, max_mean=3.0, min_disp=0.5)
highly_variable_genes = adata_filter.var["highly_variable"]
Y = torch.from_numpy(seed_labels).long()
X = torch.from_numpy(sparse.csr_matrix.todense(adata.X)).float()
# the prior mean and scale for the log count latent variable `l`
# is set using the empirical mean and variance of the observed log counts
log_counts = X.sum(-1).log()
l_mean, l_scale = log_counts.mean().item(), log_counts.std().item()
if cuda:
X, Y = X.cuda(), Y.cuda()
# subsample and remove ~50% of the unlabeled cells
labeled = torch.where(Y != -1)[0]
unlabeled = torch.where(Y == -1)[0]
unlabeled = unlabeled[torch.randperm(unlabeled.size(0))[:19800]]
idx = torch.cat([labeled, unlabeled])
num_genes = X.size(-1)
adata = adata[idx.data.cpu().numpy(), highly_variable_genes]
adata.raw = adata
return BatchDataLoader(X[idx], Y[idx], batch_size), num_genes, l_mean, l_scale, adata
| [
"torch.zeros",
"torch.cat",
"torch.nn.functional.one_hot",
"torch.randperm",
"torch.from_numpy",
"torch.distributions.Poisson",
"torch.where"
] | 1.5.0 | akihironitta/pyro | 0ab6e474330942ff4ec2a87a6cc0c671943fc5cd |
1.9 | import os
import glob
import random
import cv2
import numpy as np
import torch
import matplotlib.pyplot as plt
import open3d
from skimage import io, img_as_float32
from scipy import ndimage
from torch_geometric.data import Data, DataListLoader
from torch_geometric.loader import DataLoader as GraphLevelDataLoader
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import transform
from typing import List
from easydict import EasyDict
from utils import math_utils, data_utils, unit_tests
class ImageGraphTextureDataSet(Dataset):
def __init__(self, image_files, end_level, is_train, benchmark, img_size, crop_half_width, circle_radius, num_circles=4, max_items=None,
no_train_cropped=False, transform=None, random_mask=False):
self._is_train = is_train
self._benchmark = benchmark
self.img_size = img_size
self.crop_half_width = crop_half_width
self._end_level = end_level
self._transform = transform
self._no_train_cropped = no_train_cropped
self.image_files = np.array(image_files)
self.random_mask = random_mask
self.circle_radius = circle_radius
self.num_circles = num_circles
self.circle = torch.zeros((self.circle_radius * 2, self.circle_radius * 2, 1), dtype=torch.bool)
for row in range(self.circle.shape[0]):
for col in range(self.circle.shape[1]):
if abs(row - self.circle_radius) ** 2 + abs(col - self.circle_radius) ** 2 <= self.circle_radius ** 2:
self.circle[row, col] = True
self.traces_list = []
self.edge_indices_list = []
self.num_vertices_list = []
self.decimation = 2
# Build fake traces
for level in range(self._end_level):
level_img_size = self.img_size // (self.decimation ** level)
num_verties = level_img_size ** 2
self.num_vertices_list.append(num_verties)
if level > 0:
trace = np.arange(num_verties).reshape(level_img_size, level_img_size)
trace = np.repeat(trace, self.decimation, axis=1).repeat(self.decimation, axis=0)
trace = np.reshape(trace, (-1,))
#trace = torch.from_numpy(trace)
#trace = torch.cat((trace, trace + level_img_size * level_img_size), dim=0)
print(level, 'Trace shape:', trace.shape)
self.traces_list.append(trace)
# Build fake decimated edges
for level in range(self._end_level):
level_img_size = self.img_size // (self.decimation ** level)
edge_indices = self._generate_image_graph_edges(level_img_size)
#edge_indices = torch.from_numpy(edge_indices)
#edge_indices = torch.cat((edge_indices, edge_indices + level_img_size * level_img_size), dim=0)
#edge_indices = edge_indices.t().contiguous()
print(level, 'Number of edge indices:', edge_indices.shape)
self.edge_indices_list.append(edge_indices)
def _generate_image_graph_edges(self, img_size):
def double_set_add(s, a, b):
s.add((a, b))
s.add((b, a))
def get_neighbor_coords_list(r, c, max_size):
coords_list = []
# TODO: Should we add self-loops?
# Maybe not since most graph algorithms explicitly include the vertex they're operating on
#coords_list.append((r, c))
if r > 0:
coords_list.append((r - 1, c + 0))
#if c > 0:
# coords_list.append((r - 1, c - 1))
#if c < max_size - 1:
# coords_list.append((r - 1, c + 1))
if c > 0:
coords_list.append((r + 0, c - 1))
if c < max_size - 1:
coords_list.append((r + 0, c + 1))
if r < max_size - 1:
coords_list.append((r + 1, c + 0))
#if c > 0:
# coords_list.append((r + 1, c - 1))
#if c < max_size - 1:
# coords_list.append((r + 1, c + 1))
return coords_list
edge_indices = set()
for r in range(img_size):
for c in range(img_size):
index = r * img_size + c
neighbor_coords = get_neighbor_coords_list(r, c, img_size)
for neighbor_coord in neighbor_coords:
neighbor_index = neighbor_coord[0] * img_size + neighbor_coord[1]
double_set_add(edge_indices, index, neighbor_index)
edge_indices = np.asarray(list(edge_indices))
return edge_indices
def __len__(self):
return len(self.image_files)
def __getitem__(self, index: int):
img_path = self.image_files[index]
img = io.imread(img_path)
img = np.array(img)
sample = {'color': img}
if self._transform:
sample = self._transform(sample)
img = sample['color']
# Create circular masks
mask = torch.zeros((self.img_size, self.img_size, 1), dtype=torch.bool)
for i in range(self.num_circles):
if self._is_train and self.random_mask:
x_offset = int((self.img_size / 2 - self.crop_half_width) * (random.random() * 2.0 - 1.0) * 0.95)
y_offset = int((self.img_size / 2 - self.crop_half_width) * (random.random() * 2.0 - 1.0) * 0.95)
else:
x_offset = ((i % 2) * 2 - 1) * self.img_size // 4
y_offset = ((i // 2) * 2 - 1) * self.img_size // 4
row_start = self.img_size//2-self.circle_radius + x_offset
row_end = self.img_size//2+self.circle_radius + x_offset
col_start = self.img_size//2-self.circle_radius + y_offset
col_end = self.img_size//2+self.circle_radius + y_offset
mask[row_start:row_end, col_start:col_end] += self.circle
img = torch.reshape(img, (-1, 3))
mask = torch.reshape(mask, (-1, 1))
sample = data_utils.HierarchicalData(x=torch.cat([img * ~mask, mask], dim=-1),
color=img,
mask=mask,
edge_index=torch.from_numpy(self.edge_indices_list[0]).t().contiguous(),
#num_vertices=self.num_vertices_list,
)
##sample.num_vertices = torch.tensor(self.num_vertices_list)
num_vertices = [sample.x.shape[0]]
sample.num_vertices = torch.tensor(self.num_vertices_list, dtype=torch.int)
for level in range(1, self._end_level):
setattr(sample, f"hierarchy_edge_index_{level}", torch.from_numpy(self.edge_indices_list[level]).t().contiguous())
setattr(sample, f"hierarchy_trace_index_{level}", torch.from_numpy(self.traces_list[level - 1]))
num_vertices.append(int(sample[f"hierarchy_trace_index_{level}"].max() + 1))
sample.num_vertices = torch.tensor(num_vertices, dtype=torch.int)
return sample
class Normalize(object):
"""Normalize color images between [-1,1]."""
def __call__(self, sample):
color_image = sample['color']
# NOTE: Don't normalize input_image. It's just a matrix of coordinates
color_image = img_as_float32(color_image)
color_image = (color_image * 2.0) - 1
#color_image = color_image - 0.5
return {'color': color_image}
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, min_size, max_size):
# For now size is defined as the smaller size of an image
assert isinstance(min_size, int)
assert isinstance(max_size, int)
assert min_size <= max_size
self.min_size = min_size
self.max_size = max_size
def __call__(self, sample):
input_image = sample['color']
h, w = input_image.shape[:2]
output_size = np.random.randint(self.min_size, self.max_size + 1)
if isinstance(output_size, int):
if h > w:
new_h, new_w = output_size * h / w, output_size
else:
new_h, new_w = output_size, output_size * w / h
else:
new_h, new_w = output_size
new_h, new_w = int(new_h), int(new_w)
# TODO: Use pillow library for resizing images
# Nearest neighbor for input_image since we can't interpolate across discontinuities in uv coordinates
#input_image = transform.resize(input_image, (new_h, new_w))
#input_image = cv2.resize(input_image, (new_w, new_h), interpolation=cv2.INTER_NEAREST)
input_image = cv2.resize(input_image, (new_w, new_h), interpolation=cv2.INTER_AREA)
return {'color': input_image}
class CenterCrop(object):
def __init__(self, crop_size):
assert isinstance(crop_size, tuple)
self.crop_size = crop_size
def __call__(self, sample):
input_image = sample['color']
# Assuming input_image and color_image are the same shape
h, w, _ = input_image.shape
size_crop_h, size_crop_w = self.crop_size
# Get a valid starting and end positions
h_start = int((h - size_crop_h) / 2)
w_start = int((w - size_crop_w) / 2)
h_end = h_start + size_crop_h
w_end = w_start + size_crop_w
# Crop the input and target
input_image = input_image[h_start:h_end, w_start:w_end, :]
return {'color': input_image}
class RandomRotation(object):
def __init__(self):
self.angles = [0, 90, 180, 270]
def __call__(self, sample):
input_image = sample['color']
angle = random.choice(self.angles)
input_image = ndimage.rotate(input_image, angle, reshape=False, mode='constant')
return {'color': input_image}
class RandomFlip(object):
def __init__(self, flip_axis):
self.flip_axis = flip_axis
def __call__(self, sample):
input_image = sample['color']
if np.random.choice(a=[False, True]):
input_image = np.flip(input_image, axis=self.flip_axis).copy()
return {'color': input_image}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
input_image = sample['color']
# NOTE: Axis swapping is not necessary for uv coords since
# it is not an image, but rather a matrix of coordinates
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
#input_image = input_image.transpose((2, 0, 1))
return {'color': torch.from_numpy(input_image)}
class ImageGraphTextureDataLoader:
def __init__(self, config, multi_gpu):
self.config = EasyDict(config)
self.train_files = self._load(os.path.join(self.config.root_dir, 'train'))
self.val_files = self._load(os.path.join(self.config.root_dir, 'val'))
len_train_files, len_val_files = len(self.train_files), len(self.val_files)
total_num_files = len_train_files + len_val_files
frac_train_files = len_train_files / total_num_files
if 0 <= self.config.max_items <= total_num_files:
max_train_files = int(self.config.max_items * frac_train_files)
max_val_files = int(self.config.max_items * (1 - frac_train_files))
else:
max_train_files = int(total_num_files * frac_train_files)
max_val_files = int(total_num_files * (1 - frac_train_files))
self.train_files = self.train_files[:max_train_files]
self.val_files = self.val_files[:max_val_files]
transf_list_train = [
Normalize(),
Rescale(self.config.img_size, self.config.img_size),
CenterCrop((self.config.img_size, self.config.img_size)),
]
if self.config.random_augmentation:
transf_list_train += [
RandomRotation(),
RandomFlip(flip_axis=1),
]
transf_list_train.append(ToTensor())
# Build val/test transformation
transf_list_valid = [
Normalize(),
Rescale(self.config.img_size, self.config.img_size),
CenterCrop((self.config.img_size, self.config.img_size)),
#RandomFlip(flip_axis=1),
ToTensor()
]
transf_train = transforms.Compose(transf_list_train)
transf_valid = transforms.Compose(transf_list_valid)
if multi_gpu:
dataloader_class = DataListLoader
else:
dataloader_class = GraphLevelDataLoader
self.train_dataset = ImageGraphTextureDataSet(self.train_files, self.config.end_level, is_train=True,
circle_radius=self.config.circle_radius,
transform=transf_train, random_mask=self.config.random_mask,
no_train_cropped=self.config.no_train_cropped, benchmark=False,
img_size=self.config.img_size, max_items=self.config.max_items,
crop_half_width=self.config.crop_half_width)
print('train dataset len', len(self.train_dataset))
self.train_loader = dataloader_class(self.train_dataset, batch_size=self.config.train_batch_size,
shuffle=True, pin_memory=True, persistent_workers=self.config.num_workers > 0,
num_workers=self.config.num_workers)
self.sample_train_loader = dataloader_class(self.train_dataset, batch_size=self.config.train_batch_size,
shuffle=False, pin_memory=True,
num_workers=self.config.num_workers)
self.sample_train_dataset = torch.utils.data.Subset(self.train_dataset,
np.arange(min(self.config.num_static_samples,
len(self.train_dataset))))
self.sample_train_loader = dataloader_class(self.sample_train_dataset, batch_size=self.config.train_batch_size,
shuffle=False, pin_memory=True,
num_workers=self.config.num_workers)
# TODO: Update val dataset so that it doesn't have to be treated like a train dataset
# includes is_train=False and no_train_cropped=self.config.no_train_cropped
self.val_dataset = ImageGraphTextureDataSet(self.val_files, self.config.end_level, is_train=False,
circle_radius=self.config.circle_radius,
transform=transf_valid, benchmark=False,
no_train_cropped=self.config.no_train_cropped,
img_size=self.config.img_size, max_items=self.config.max_items,
crop_half_width=self.config.crop_half_width)
print('val dataset len', len(self.val_dataset))
#unit_tests.compare_train_val(self.train_colors, self.val_colors)
self.val_loader = dataloader_class(self.val_dataset, batch_size=self.config.test_batch_size, shuffle=False,
pin_memory=True, persistent_workers=self.config.num_workers > 0,
num_workers=self.config.num_workers)
self.sample_val_dataset = torch.utils.data.Subset(self.val_dataset,
np.arange(min(self.config.num_static_samples,
len(self.val_dataset))))
self.sample_val_loader = dataloader_class(self.sample_val_dataset, batch_size=self.config.test_batch_size,
shuffle=False, pin_memory=True,
num_workers=self.config.num_workers)
def _load(self, root_dir, seed=42) -> List[str]:
filenames = glob.glob(f"{root_dir}/*.png")
filenames = sorted(filenames)
random.Random(seed).shuffle(filenames)
return filenames | [
"torch.zeros",
"torch.cat",
"torch.from_numpy",
"torch.tensor",
"torch.reshape"
] | 1.9.1 | johnpeterflynn/surface-texture-inpainting-net | b2de05eaa47c9bcca53b9aee12b6012ac2c05156 |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 13